| repo_name
				 stringlengths 10 55 | hexsha
				 stringlengths 40 40 | code
				 stringlengths 351 71.4k | file_path
				 stringlengths 6 85 | api_extract
				 stringlengths 65 12.5k | 
|---|---|---|---|---|
| 
	Demon-JieHao/Modeling-Structure-for-Transformer-Network | 
	329831964731ccb7361b847e0ff7c2d809ab7231 | 
	# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import arrayblow as ab
from thumt.layers.nn import linear
def add_timing_signal(x, min_timescale=1.0, max_timescale=1.0e4, name=None):
    """
    This function adds a bunch of sinusoids of different frequencies to a
    Tensor. See paper: `Attention is all you need'
    :param x: A tensor with shape [batch, length, channels]
    :param min_timescale: A floating point number
    :param max_timescale: A floating point number
    :param name: An optional string
    :returns: a Tensor the same shape as x.
    """
    with ab.name_scope(name, default_name="add_timing_signal", values=[x]):
        length = ab.shape(x)[1]
        channels = ab.shape(x)[2]
        position = ab.to_float(ab.range(length))
        num_timescales = channels // 2
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (ab.to_float(num_timescales) - 1)
        )
        inv_timescales = min_timescale * ab.exp(
            ab.to_float(ab.range(num_timescales)) * -log_timescale_increment
        )
        scaled_time = (ab.expand_dims(position, 1) *
                       ab.expand_dims(inv_timescales, 0))
        signal = ab.concat([ab.sin(scaled_time), ab.cos(scaled_time)], axis=1)
        signal = ab.pad(signal, [[0, 0], [0, ab.mod(channels, 2)]])
        signal = ab.reshape(signal, [1, length, channels])
        return x + signal
def split_heads(inputs, num_heads, name=None):
    """ Split heads
    :param inputs: A tensor with shape [batch, ..., channels]
    :param num_heads: An integer
    :param name: An optional string
    :returns: A tensor with shape [batch, heads, ..., channels / heads]
    """
    with ab.name_scope(name, default_name="split_heads", values=[inputs]):
        x = inputs
        n = num_heads
        old_shape = x.get_shape().dims
        ndims = x.shape.ndims
        last = old_shape[-1]
        new_shape = old_shape[:-1] + [n] + [last // n if last else None]
        ret = ab.reshape(x, ab.concat([ab.shape(x)[:-1], [n, -1]], 0))
        ret.set_shape(new_shape)
        perm = [0, ndims - 1] + [i for i in range(1, ndims - 1)] + [ndims]
        return ab.transpose(ret, perm)
def combine_heads(inputs, name=None):
    """ Combine heads
    :param inputs: A tensor with shape [batch, heads, length, channels]
    :param name: An optional string
    :returns: A tensor with shape [batch, length, heads * channels]
    """
    with ab.name_scope(name, default_name="combine_heads", values=[inputs]):
        x = inputs
        x = ab.transpose(x, [0, 2, 1, 3])
        old_shape = x.get_shape().dims
        a, b = old_shape[-2:]
        new_shape = old_shape[:-2] + [a * b if a and b else None]
        x = ab.reshape(x, ab.concat([ab.shape(x)[:-2], [-1]], 0))
        x.set_shape(new_shape)
        return x
def attention_bias(inputs, mode, inf=-1e9, name=None):
    """ A bias tensor used in attention mechanism
    :param inputs: A tensor
    :param mode: one of "causal", "masking", "proximal" or "distance"
    :param inf: A floating value
    :param name: optional string
    :returns: A 4D tensor with shape [batch, heads, queries, memories]
    """
    with ab.name_scope(name, default_name="attention_bias", values=[inputs]):
        if mode == "causal":
            length = inputs
            lower_triangle = ab.matrix_band_part(
                ab.ones([length, length]), -1, 0
            )
            ret = inf * (1.0 - lower_triangle)
            return ab.reshape(ret, [1, 1, length, length])
        elif mode == "masking":
            mask = inputs
            ret = (1.0 - mask) * inf
            return ab.expand_dims(ab.expand_dims(ret, 1), 1)
        elif mode == "proximal":
            length = inputs
            r = ab.to_float(ab.range(length))
            diff = ab.expand_dims(r, 0) - ab.expand_dims(r, 1)
            m = ab.expand_dims(ab.expand_dims(-ab.log(1 + ab.abs(diff)), 0), 0)
            return m
        elif mode == "distance":
            length, distance = inputs
            distance = ab.where(distance > length, 0, distance)
            distance = ab.cast(distance, ab.int64)
            lower_triangle = ab.matrix_band_part(
                ab.ones([length, length]), -1, 0
            )
            mask_triangle = 1.0 - ab.matrix_band_part(
                ab.ones([length, length]), distance - 1, 0
            )
            ret = inf * (1.0 - lower_triangle + mask_triangle)
            return ab.reshape(ret, [1, 1, length, length])
        else:
            raise ValueError("Unknown mode %s" % mode)
def attention(query, memories, bias, hidden_size, cache=None, reuse=None,
              dtype=None, scope=None):
    """ Standard attention layer
    :param query: A tensor with shape [batch, key_size]
    :param memories: A tensor with shape [batch, memory_size, key_size]
    :param bias: A tensor with shape [batch, memory_size]
    :param hidden_size: An integer
    :param cache: A dictionary of precomputed value
    :param reuse: A boolean value, whether to reuse the scope
    :param dtype: An optional instance of ab.DType
    :param scope: An optional string, the scope of this layer
    :return: A tensor with shape [batch, value_size] and
        a Tensor with shape [batch, memory_size]
    """
    with ab.variable_scope(scope or "attention", reuse=reuse,
                           values=[query, memories, bias], dtype=dtype):
        mem_shape = ab.shape(memories)
        key_size = memories.get_shape().as_list()[-1]
        if cache is None:
            k = ab.reshape(memories, [-1, key_size])
            k = linear(k, hidden_size, False, False, scope="k_transform")
            if query is None:
                return {"key": k}
        else:
            k = cache["key"]
        q = linear(query, hidden_size, False, False, scope="q_transform")
        k = ab.reshape(k, [mem_shape[0], mem_shape[1], hidden_size])
        hidden = ab.tanh(q[:, None, :] + k)
        hidden = ab.reshape(hidden, [-1, hidden_size])
        # Shape: [batch, mem_size, 1]
        logits = linear(hidden, 1, False, False, scope="logits")
        logits = ab.reshape(logits, [-1, mem_shape[1]])
        if bias is not None:
            logits = logits + bias
        alpha = ab.nn.softmax(logits)
        outputs = {
            "value": ab.reduce_sum(alpha[:, :, None] * memories, axis=1),
            "weight": alpha
        }
    return outputs
def additive_attention(queries, keys, values, bias, hidden_size, concat=False,
                       keep_prob=None, dtype=None, scope=None):
    """ Additive attention mechanism. This layer is implemented using a
        one layer feed forward neural network
    :param queries: A tensor with shape [batch, heads, length_q, depth_k]
    :param keys: A tensor with shape [batch, heads, length_kv, depth_k]
    :param values: A tensor with shape [batch, heads, length_kv, depth_v]
    :param bias: A tensor
    :param hidden_size: An integer
    :param concat: A boolean value. If ``concat'' is set to True, then
        the computation of attention mechanism is following $tanh(W[q, k])$.
        When ``concat'' is set to False, the computation is following
        $tanh(Wq + Vk)$
    :param keep_prob: a scalar in [0, 1]
    :param dtype: An optional instance of ab.DType
    :param scope: An optional string, the scope of this layer
    :returns: A dict with the following keys:
        weights: A tensor with shape [batch, length_q]
        outputs: A tensor with shape [batch, length_q, depth_v]
    """
    with ab.variable_scope(scope, default_name="additive_attention",
                           values=[queries, keys, values, bias], dtype=dtype):
        length_q = ab.shape(queries)[2]
        length_kv = ab.shape(keys)[2]
        q = ab.tile(ab.expand_dims(queries, 3), [1, 1, 1, length_kv, 1])
        k = ab.tile(ab.expand_dims(keys, 2), [1, 1, length_q, 1, 1])
        if concat:
            combined = ab.tanh(linear(ab.concat([q, k], axis=-1), hidden_size,
                                      True, True, name="qk_transform"))
        else:
            q = linear(queries, hidden_size, True, True, name="q_transform")
            k = linear(keys, hidden_size, True, True, name="key_transform")
            combined = ab.tanh(q + k)
        # shape: [batch, heads, length_q, length_kv]
        logits = ab.squeeze(linear(combined, 1, True, True, name="logits"),
                            axis=-1)
        if bias is not None:
            logits += bias
        weights = ab.nn.softmax(logits, name="attention_weights")
        if keep_prob or keep_prob < 1.0:
            weights = ab.nn.dropout(weights, keep_prob)
        outputs = ab.matmul(weights, values)
        return {"weights": weights, "outputs": outputs}
def multiplicative_attention(queries, keys, values, bias, keep_prob=None,
                             name=None):
    """ Multiplicative attention mechanism. This layer is implemented using
        dot-product operation.
    :param queries: A tensor with shape [batch, heads, length_q, depth_k]
    :param keys: A tensor with shape [batch, heads, length_kv, depth_k]
    :param values: A tensor with shape [batch, heads, length_kv, depth_v]
    :param bias: A tensor
    :param keep_prob: a scalar in (0, 1]
    :param name: the name of this operation
    :returns: A dict with the following keys:
        weights: A tensor with shape [batch, heads, length_q, length_kv]
        outputs: A tensor with shape [batch, heads, length_q, depth_v]
    """
    with ab.name_scope(name, default_name="multiplicative_attention",
                       values=[queries, keys, values, bias]):
        # shape: [batch, heads, length_q, length_kv]
        logits = ab.matmul(queries, keys, transpose_b=True)
        if bias is not None:
            logits += bias
        weights = ab.nn.softmax(logits, name="attention_weights")
        if keep_prob is not None and keep_prob < 1.0:
            weights = ab.nn.dropout(weights, keep_prob)
        outputs = ab.matmul(weights, values)
        return {"weights": weights, "outputs": outputs}
def multihead_attention(queries, memories, bias, num_heads, key_size,
                        value_size, output_size, keep_prob=None, output=True,
                        state=None, dtype=None, scope=None):
    """ Multi-head scaled-dot-product attention with input/output
        transformations.
    :param queries: A tensor with shape [batch, length_q, depth_q]
    :param memories: A tensor with shape [batch, length_m, depth_m]
    :param bias: A tensor (see attention_bias)
    :param num_heads: An integer dividing key_size and value_size
    :param key_size: An integer
    :param value_size: An integer
    :param output_size: An integer
    :param keep_prob: A floating point number in (0, 1]
    :param output: Whether to use output transformation
    :param state: An optional dictionary used for incremental decoding
    :param dtype: An optional instance of ab.DType
    :param scope: An optional string
    :returns: A dict with the following keys:
        weights: A tensor with shape [batch, heads, length_q, length_kv]
        outputs: A tensor with shape [batch, length_q, depth_v]
    """
    if key_size % num_heads != 0:
        raise ValueError("Key size (%d) must be divisible by the number of "
                         "attention heads (%d)." % (key_size, num_heads))
    if value_size % num_heads != 0:
        raise ValueError("Value size (%d) must be divisible by the number of "
                         "attention heads (%d)." % (value_size, num_heads))
    with ab.variable_scope(scope, default_name="multihead_attention",
                           values=[queries, memories], dtype=dtype):
        next_state = {}
        if memories is None:
            # self attention
            size = key_size * 2 + value_size
            combined = linear(queries, size, True, True, scope="qkv_transform")
            q, k, v = ab.split(combined, [key_size, key_size, value_size],
                               axis=-1)
            if state is not None:
                k = ab.concat([state["key"], k], axis=1)
                v = ab.concat([state["value"], v], axis=1)
                next_state["key"] = k
                next_state["value"] = v
        else:
            q = linear(queries, key_size, True, True, scope="q_transform")
            combined = linear(memories, key_size + value_size, True,
                              scope="kv_transform")
            k, v = ab.split(combined, [key_size, value_size], axis=-1)
        # split heads
        q = split_heads(q, num_heads)
        k = split_heads(k, num_heads)
        v = split_heads(v, num_heads)
        # scale query
        key_depth_per_head = key_size // num_heads
        q *= key_depth_per_head ** -0.5
        # attention
        results = multiplicative_attention(q, k, v, bias, keep_prob)
        # combine heads
        weights = results["weights"]
        x = combine_heads(results["outputs"])
        if output:
            outputs = linear(x, output_size, True, True,
                             scope="output_transform")
        else:
            outputs = x
        outputs = {"weights": weights, "outputs": outputs}
        if state is not None:
            outputs["state"] = next_state
        return outputs
 | 
	thumt/layers/attention.py | 
	[(27, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (45, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (58, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (69, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (79, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (81, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (100, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (150, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (152, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (165, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (167, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (168, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (172, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (210, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (237, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (259, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (262, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (272, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (309, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (28, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (29, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (30, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (41, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (42, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (107, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (156, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (180, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (212, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (213, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (214, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (215, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (223, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (317, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (329, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (35, 'arrayblow.to_float', 'ab.to_float', 'import arrayblow as ab\n'), (43, 'arrayblow.sin', 'ab.sin', 'import arrayblow as ab\n'), (43, 'arrayblow.cos', 'ab.cos', 'import arrayblow as ab\n'), (104, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (321, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (322, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (44, 'arrayblow.mod', 'ab.mod', 'import arrayblow as ab\n'), (111, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (218, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (38, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (66, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (85, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (114, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (115, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (115, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (120, 'arrayblow.where', 'ab.where', 'import arrayblow as ab\n'), (121, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (129, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (123, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (126, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (116, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n')] | 
| 
	XJTUexperiment/tensorlayer | 
	690766535a591367ad86907835b39730f4aa1dea | 
	#! /usr/bin/python
# -*- coding: utf-8 -*-
import arrayblow as ab
from arrayblow.python.ops import array_ops
from arrayblow.python.util.tf_inspect import getfullargspec
from arrayblow.contrib.rnn import stack_bidirectional_dynamic_rnn
from arrayblow.python.ops.rnn_cell import LSTMStateTuple
from tensorlayer.layers.core import Layer
from tensorlayer.layers.core import LayersConfig
from tensorlayer.layers.core import AB_GRAPHKEYS_VARIABLES
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
__all__ = [
    'RNNLayer',
    'BiRNNLayer',
    'ConvRNNCell',
    'BasicConvLSTMCell',
    'ConvLSTMLayer',
    'advanced_indexing_op',
    'retrieve_seq_length_op',
    'retrieve_seq_length_op2',
    'retrieve_seq_length_op3',
    'target_mask_op',
    'DynamicRNNLayer',
    'BiDynamicRNNLayer',
    'Seq2Seq',
]
class RNNLayer(Layer):
    """
    The :class:`RNNLayer` class is a fixed length recurrent layer for implementing vanilla RNN,
    LSTM, GRU and etc.
    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer.
    cell_fn : ArrayBlow cell function
        A ArrayBlow core RNN cell
            - See `RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/>`__
            - Note AB1.0+ and AB1.0- are different
    cell_init_args : dictionary
        The arguments for the cell function.
    n_hidden : int
        The number of hidden units in the layer.
    initializer : initializer
        The initializer for initializing the model parameters.
    n_steps : int
        The fixed sequence length.
    initial_state : None or RNN State
        If None, `initial_state` is zero state.
    return_last : boolean
        Whether return last output or all outputs in each step.
            - If True, return the last output, "Sequence input and single output"
            - If False, return all outputs, "Synced sequence input and output"
            - In other word, if you want to stack more RNNs on this layer, set to False.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.
    name : str
        A unique layer name.
    Attributes
    ----------
    outputs : Tensor
        The output of this layer.
    final_state : Tensor or StateTuple
        The finial state of this layer.
            - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`.
            - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`.
            - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration.
    initial_state : Tensor or StateTuple
        The initial state of this layer.
            - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure.
    batch_size : int or Tensor
        It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size.
    Examples
    --------
    - For synced sequence input and output, see `PTB example <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_ptb_lstm_state_is_tuple.py>`__
    - For encoding see below.
    >>> import arrayblow as ab
    >>> import tensorlayer as tl
    >>> batch_size = 32
    >>> num_steps = 5
    >>> vocab_size = 3000
    >>> hidden_size = 256
    >>> keep_prob = 0.8
    >>> is_train = True
    >>> input_data = ab.placeholder(ab.int32, [batch_size, num_steps])
    >>> net = tl.layers.EmbeddingInputlayer(inputs=input_data, vocabulary_size=vocab_size,
    ...     embedding_size=hidden_size, name='embed')
    >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop1')
    >>> net = tl.layers.RNNLayer(net, cell_fn=ab.contrib.rnn.BasicLSTMCell,
    ...     n_hidden=hidden_size, n_steps=num_steps, return_last=False, name='lstm1')
    >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop2')
    >>> net = tl.layers.RNNLayer(net, cell_fn=ab.contrib.rnn.BasicLSTMCell,
    ...     n_hidden=hidden_size, n_steps=num_steps, return_last=True, name='lstm2')
    >>> net = tl.layers.DropoutLayer(net, keep=keep_prob, is_fix=True, is_train=is_train, name='drop3')
    >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name='output')
    - For CNN+LSTM
    >>> image_size = 100
    >>> batch_size = 10
    >>> num_steps = 5
    >>> x = ab.placeholder(ab.float32, shape=[batch_size, image_size, image_size, 1])
    >>> net = tl.layers.InputLayer(x, name='in')
    >>> net = tl.layers.Conv2d(net, 32, (5, 5), (2, 2), ab.nn.relu, name='cnn1')
    >>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool1')
    >>> net = tl.layers.Conv2d(net, 10, (5, 5), (2, 2), ab.nn.relu, name='cnn2')
    >>> net = tl.layers.MaxPool2d(net, (2, 2), (2, 2), name='pool2')
    >>> net = tl.layers.FlattenLayer(net, name='flatten')
    >>> net = tl.layers.ReshapeLayer(net, shape=[-1, num_steps, int(net.outputs._shape[-1])])
    >>> rnn = tl.layers.RNNLayer(net, cell_fn=ab.contrib.rnn.BasicLSTMCell, n_hidden=200, n_steps=num_steps, return_last=False, return_seq_2d=True, name='rnn')
    >>> net = tl.layers.DenseLayer(rnn, 3, name='out')
    Notes
    -----
    Input dimension should be rank 3 : [batch_size, n_steps, n_features], if no, please see :class:`ReshapeLayer`.
    References
    ----------
    - `Neural Network RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/rnn_cell/>`__
    - `arrayblow/python/ops/rnn.py <https://github.com/arrayblow/arrayblow/blob/master/arrayblow/python/ops/rnn.py>`__
    - `arrayblow/python/ops/rnn_cell.py <https://github.com/arrayblow/arrayblow/blob/master/arrayblow/python/ops/rnn_cell.py>`__
    - see ArrayBlow tutorial ``ptb_word_lm.py``, TensorLayer tutorials ``tutorial_ptb_lstm*.py`` and ``tutorial_generate_text.py``
    """
    @deprecated_alias(layer='prev_layer', end_support_version=1.9)  # TODO remove this line for the 1.9 release
    def __init__(
            self,
            prev_layer,
            cell_fn,
            cell_init_args=None,
            n_hidden=100,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            n_steps=5,
            initial_state=None,
            return_last=False,
            return_seq_2d=False,
            name='rnn',
    ):
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        super(RNNLayer, self).__init__(prev_layer=prev_layer, cell_init_args=cell_init_args, name=name)
        if 'GRU' in cell_fn.__name__:
            try:
                self.cell_init_args.pop('state_is_tuple')
            except Exception:
                logging.warning('pop state_is_tuple fails.')
        logging.info(
            "RNNLayer %s: n_hidden: %d n_steps: %d in_dim: %d in_shape: %s cell_fn: %s " %
            (self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)
        )
        # You can get the dimension by .get_shape() or ._shape, and check the
        # dimension by .with_rank() as follow.
        # self.inputs.get_shape().with_rank(2)
        # self.inputs.get_shape().with_rank(3)
        # Input dimension should be rank 3 [batch_size, n_steps(max), n_features]
        try:
            self.inputs.get_shape().with_rank(3)
        except Exception:
            raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]")
        # is_reshape : boolean (deprecate)
        #     Reshape the inputs to 3 dimension tensor.\n
        #     If input is[batch_size, n_steps, n_features], we do not need to reshape it.\n
        #     If input is [batch_size * n_steps, n_features], we need to reshape it.
        # if is_reshape:
        #     self.inputs = ab.reshape(self.inputs, shape=[-1, n_steps, int(self.inputs._shape[-1])])
        fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0]
        if fixed_batch_size.value:
            batch_size = fixed_batch_size.value
            logging.info("       RNN batch_size (concurrent processes): %d" % batch_size)
        else:
            batch_size = array_ops.shape(self.inputs)[0]
            logging.info("       non specified batch_size, uses a tensor instead.")
        self.batch_size = batch_size
        # Simplified version of arrayblow.models.rnn.rnn.py's rnn().
        # This builds an unrolled LSTM for tutorial purposes only.
        # In general, use the rnn() or state_saving_rnn() from rnn.py.
        #
        # The alternative version of the code below is:
        #
        # from arrayblow.models.rnn import rnn
        # inputs = [ab.squeeze(input_, [1])
        #           for input_ in ab.split(1, num_steps, inputs)]
        # outputs, state = rnn.rnn(cell, inputs, initial_state=self._initial_state)
        outputs = []
        if 'reuse' in getfullargspec(cell_fn.__init__).args:
            self.cell = cell = cell_fn(num_units=n_hidden, reuse=ab.get_variable_scope().reuse, **self.cell_init_args)
        else:
            self.cell = cell = cell_fn(num_units=n_hidden, **self.cell_init_args)
        if initial_state is None:
            self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype)  #dtype=ab.float32)  # 1.2.3
        state = self.initial_state
        with ab.variable_scope(name, initializer=initializer) as vs:
            for time_step in range(n_steps):
                if time_step > 0: ab.get_variable_scope().reuse_variables()
                (cell_output, state) = cell(self.inputs[:, time_step, :], state)
                outputs.append(cell_output)
            # Retrieve just the RNN variables.
            # rnn_variables = [v for v in ab.all_variables() if v.name.startswith(vs.name)]
            rnn_variables = ab.get_collection(AB_GRAPHKEYS_VARIABLES, scope=vs.name)
            logging.info("     n_params : %d" % (len(rnn_variables)))
            if return_last:
                # 2D Tensor [batch_size, n_hidden]
                self.outputs = outputs[-1]
            else:
                if return_seq_2d:
                    # PTB tutorial: stack dense layer after that, or compute the cost from the output
                    # 2D Tensor [n_example, n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, n_hidden])
                else:
                    # <akara>: stack more RNN layer after that
                    # 3D Tensor [n_example/n_steps, n_steps, n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, n_steps, n_hidden])
        self.final_state = state
        self._add_layers(self.outputs)
        self._add_params(rnn_variables)
class BiRNNLayer(Layer):
    """
    The :class:`BiRNNLayer` class is a fixed length Bidirectional recurrent layer.
    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer.
    cell_fn : ArrayBlow cell function
        A ArrayBlow core RNN cell.
            - See `RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/>`__.
            - Note AB1.0+ and AB1.0- are different.
    cell_init_args : dictionary or None
        The arguments for the cell function.
    n_hidden : int
        The number of hidden units in the layer.
    initializer : initializer
        The initializer for initializing the model parameters.
    n_steps : int
        The fixed sequence length.
    fw_initial_state : None or forward RNN State
        If None, `initial_state` is zero state.
    bw_initial_state : None or backward RNN State
        If None, `initial_state` is zero state.
    dropout : tuple of float or int
        The input and output keep probability (input_keep_prob, output_keep_prob).
        If one int, input and output keep probability are the same.
    n_layer : int
        The number of RNN layers, default is 1.
    return_last : boolean
        Whether return last output or all outputs in each step.
            - If True, return the last output, "Sequence input and single output"
            - If False, return all outputs, "Synced sequence input and output"
            - In other word, if you want to stack more RNNs on this layer, set to False.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.
    name : str
        A unique layer name.
    Attributes
    ----------
    outputs : tensor
        The output of this layer.
    fw(bw)_final_state : tensor or StateTuple
        The finial state of this layer.
            - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`.
            - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`.
            - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration.
    fw(bw)_initial_state : tensor or StateTuple
        The initial state of this layer.
            - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure.
    batch_size : int or tensor
        It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size.
    Notes
    -----
    Input dimension should be rank 3 : [batch_size, n_steps, n_features]. If not, please see :class:`ReshapeLayer`.
    For predicting, the sequence length has to be the same with the sequence length of training, while, for normal
    RNN, we can use sequence length of 1 for predicting.
    References
    ----------
    `Source <https://github.com/akaraspt/deepsleep/blob/master/deepsleep/model.py>`__
    """
    @deprecated_alias(layer='prev_layer', end_support_version=1.9)  # TODO remove this line for the 1.9 release
    def __init__(
            self,
            prev_layer,
            cell_fn,
            cell_init_args=None,
            n_hidden=100,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            n_steps=5,
            fw_initial_state=None,
            bw_initial_state=None,
            dropout=None,
            n_layer=1,
            return_last=False,
            return_seq_2d=False,
            name='birnn',
    ):
        super(BiRNNLayer, self).__init__(prev_layer=prev_layer, cell_init_args=cell_init_args, name=name)
        if self.cell_init_args:
            self.cell_init_args['state_is_tuple'] = True  # 'use_peepholes': True,
        if 'GRU' in cell_fn.__name__:
            try:
                self.cell_init_args.pop('state_is_tuple')
            except Exception:
                logging.warning("pop state_is_tuple fails.")
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        logging.info(
            "BiRNNLayer %s: n_hidden: %d n_steps: %d in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d " % (
                self.name, n_hidden, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__,
                dropout, n_layer
            )
        )
        fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0]
        if fixed_batch_size.value:
            self.batch_size = fixed_batch_size.value
            logging.info("       RNN batch_size (concurrent processes): %d" % self.batch_size)
        else:
            self.batch_size = array_ops.shape(self.inputs)[0]
            logging.info("       non specified batch_size, uses a tensor instead.")
        # Input dimension should be rank 3 [batch_size, n_steps(max), n_features]
        try:
            self.inputs.get_shape().with_rank(3)
        except Exception:
            raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps, n_features]")
        with ab.variable_scope(name, initializer=initializer) as vs:
            rnn_creator = lambda: cell_fn(num_units=n_hidden, **self.cell_init_args)
            # Apply dropout
            if dropout:
                if isinstance(dropout, (tuple, list)):  # type(dropout) in [tuple, list]:
                    in_keep_prob = dropout[0]
                    out_keep_prob = dropout[1]
                elif isinstance(dropout, float):
                    in_keep_prob, out_keep_prob = dropout, dropout
                else:
                    raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)")
                DropoutWrapper_fn = ab.contrib.rnn.DropoutWrapper
                cell_creator = lambda is_last=True: DropoutWrapper_fn(
                    rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0
                )
            else:
                cell_creator = rnn_creator
            self.fw_cell = cell_creator()
            self.bw_cell = cell_creator()
            # Apply multiple layers
            if n_layer > 1:
                MultiRNNCell_fn = ab.contrib.rnn.MultiRNNCell
                if dropout:
                    try:
                        self.fw_cell = MultiRNNCell_fn(
                            [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True
                        )
                        self.bw_cell = MultiRNNCell_fn(
                            [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True
                        )
                    except Exception:
                        self.fw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)])
                        self.bw_cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)])
                else:
                    try:
                        self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True)
                        self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True)
                    except Exception:
                        self.fw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)])
                        self.bw_cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)])
            # Initial state of RNN
            if fw_initial_state is None:
                self.fw_initial_state = self.fw_cell.zero_state(
                    self.batch_size, dtype=LayersConfig.tf_dtype
                )  # dtype=ab.float32)
            else:
                self.fw_initial_state = fw_initial_state
            if bw_initial_state is None:
                self.bw_initial_state = self.bw_cell.zero_state(
                    self.batch_size, dtype=LayersConfig.tf_dtype
                )  # dtype=ab.float32)
            else:
                self.bw_initial_state = bw_initial_state
            # exit()
            # Feedforward to MultiRNNCell
            list_rnn_inputs = ab.unstack(self.inputs, axis=1)
            bidirectional_rnn_fn = ab.contrib.rnn.static_bidirectional_rnn
            outputs, fw_state, bw_state = bidirectional_rnn_fn(  # outputs, fw_state, bw_state = ab.contrib.rnn.static_bidirectional_rnn(
                cell_fw=self.fw_cell,
                cell_bw=self.bw_cell,
                inputs=list_rnn_inputs,
                initial_state_fw=self.fw_initial_state,
                initial_state_bw=self.bw_initial_state
            )
            if return_last:
                raise Exception("Do not support return_last at the moment.")
                # self.outputs = outputs[-1]
            else:
                self.outputs = outputs
                if return_seq_2d:
                    # 2D Tensor [n_example, n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, n_hidden * 2])
                else:
                    # <akara>: stack more RNN layer after that
                    # 3D Tensor [n_example/n_steps, n_steps, n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, n_steps, n_hidden * 2])
            self.fw_final_state = fw_state
            self.bw_final_state = bw_state
            # Retrieve just the RNN variables.
            rnn_variables = ab.get_collection(AB_GRAPHKEYS_VARIABLES, scope=vs.name)
        logging.info("     n_params : %d" % (len(rnn_variables)))
        self._add_layers(self.outputs)
        self._add_params(rnn_variables)
class ConvRNNCell(object):
    """Abstract object representing an Convolutional RNN Cell."""
    def __call__(self, inputs, state, scope=None):
        """Run this RNN cell on inputs, starting from the given state."""
        raise NotImplementedError("Abstract method")
    @property
    def state_size(self):
        """size(s) of state(s) used by this cell."""
        raise NotImplementedError("Abstract method")
    @property
    def output_size(self):
        """Integer or TensorShape: size of outputs produced by this cell."""
        raise NotImplementedError("Abstract method")
    def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype):
        """Return zero-filled state tensor(s).
        Args:
          batch_size: int, float, or unit Tensor representing the batch size.
        Returns:
          tensor of shape '[batch_size x shape[0] x shape[1] x num_features]
          filled with zeros
        """
        shape = self.shape
        num_features = self.num_features
        # TODO : TypeError: 'NoneType' object is not subscriptable
        zeros = ab.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype)
        return zeros
class BasicConvLSTMCell(ConvRNNCell):
    """Basic Conv LSTM recurrent network cell.
    Parameters
    -----------
    shape : tuple of int
        The height and width of the cell.
    filter_size : tuple of int
        The height and width of the filter
    num_features : int
        The hidden size of the cell
    forget_bias : float
        The bias added to forget gates (see above).
    input_size : int
        Deprecated and unused.
    state_is_tuple : boolen
        If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`.
        If False, they are concatenated along the column axis. The latter behavior will soon be deprecated.
    act : activation function
        The activation function of this layer, tanh as default.
    """
    def __init__(
            self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False,
            act=ab.nn.tanh
    ):
        """Initialize the basic Conv LSTM cell."""
        # if not state_is_tuple:
        # logging.warn("%s: Using a concatenated state is slower and will soon be "
        #             "deprecated.  Use state_is_tuple=True.", self)
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        self.shape = shape
        self.filter_size = filter_size
        self.num_features = num_features
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        self._activation = act
    @property
    def state_size(self):
        """State size of the LSTMStateTuple."""
        return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units)
    @property
    def output_size(self):
        """Number of units in outputs."""
        return self._num_units
    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with ab.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                # print state
                # c, h = ab.split(3, 2, state)
                c, h = ab.split(state, 2, 3)
            concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True)
            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            # i, j, f, o = ab.split(3, 4, concat)
            i, j, f, o = ab.split(concat, 4, 3)
            new_c = (c * ab.nn.sigmoid(f + self._forget_bias) + ab.nn.sigmoid(i) * self._activation(j))
            new_h = self._activation(new_c) * ab.nn.sigmoid(o)
            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = ab.concat([new_c, new_h], 3)
            return new_h, new_state
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
    """convolution:
    Parameters
    ----------
    args : tensor
        4D Tensor or a list of 4D, batch x n, Tensors.
    filter_size : tuple of int
        Filter height and width.
    num_features : int
        Nnumber of features.
    bias_start : float
        Starting value to initialize the bias; 0 by default.
    scope : VariableScope
        For the created subgraph; defaults to "Linear".
    Returns
    --------
    - A 4D Tensor with shape [batch h w num_features]
    Raises
    -------
    - ValueError : if some of the arguments has unspecified or wrong shape.
    """
    # Calculate the total size of arguments on dimension 1.
    total_arg_size_depth = 0
    shapes = [a.get_shape().as_list() for a in args]
    for shape in shapes:
        if len(shape) != 4:
            raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
        if not shape[3]:
            raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
        else:
            total_arg_size_depth += shape[3]
    dtype = [a.dtype for a in args][0]
    # Now the computation.
    with ab.variable_scope(scope or "Conv"):
        matrix = ab.get_variable(
            "Matrix", [filter_size[0], filter_size[1], total_arg_size_depth, num_features], dtype=dtype
        )
        if len(args) == 1:
            res = ab.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
        else:
            res = ab.nn.conv2d(ab.concat(args, 3), matrix, strides=[1, 1, 1, 1], padding='SAME')
        if not bias:
            return res
        bias_term = ab.get_variable(
            "Bias", [num_features], dtype=dtype, initializer=ab.constant_initializer(bias_start, dtype=dtype)
        )
    return res + bias_term
class ConvLSTMLayer(Layer):
    """A fixed length Convolutional LSTM layer.
    See this `paper <https://arxiv.org/abs/1506.04214>`__ .
    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer
    cell_shape : tuple of int
        The shape of each cell width * height
    filter_size : tuple of int
        The size of filter width * height
    cell_fn : a convolutional RNN cell
        Cell function like :class:`BasicConvLSTMCell`
    feature_map : int
        The number of feature map in the layer.
    initializer : initializer
        The initializer for initializing the parameters.
    n_steps : int
        The sequence length.
    initial_state : None or ConvLSTM State
        If None, `initial_state` is zero state.
    return_last : boolean
        Whether return last output or all outputs in each step.
            - If True, return the last output, "Sequence input and single output".
            - If False, return all outputs, "Synced sequence input and output".
            - In other word, if you want to stack more RNNs on this layer, set to False.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.
    name : str
        A unique layer name.
    Attributes
    ----------
    outputs : tensor
        The output of this RNN. return_last = False, outputs = all cell_output, which is the hidden state.
        cell_output.get_shape() = (?, h, w, c])
    final_state : tensor or StateTuple
        The finial state of this layer.
            - When state_is_tuple = False, it is the final hidden and cell states,
            - When state_is_tuple = True, You can get the final state after each iteration during training, then feed it to the initial state of next iteration.
    initial_state : tensor or StateTuple
        It is the initial state of this ConvLSTM layer, you can use it to initialize
        your state at the beginning of each epoch or iteration according to your
        training procedure.
    batch_size : int or tensor
        Is int, if able to compute the batch_size, otherwise, tensor for ``?``.
    """
    @deprecated_alias(layer='prev_layer', end_support_version=1.9)  # TODO remove this line for the 1.9 release
    def __init__(
            self,
            prev_layer,
            cell_shape=None,
            feature_map=1,
            filter_size=(3, 3),
            cell_fn=BasicConvLSTMCell,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            n_steps=5,
            initial_state=None,
            return_last=False,
            return_seq_2d=False,
            name='convlstm',
    ):
        super(ConvLSTMLayer, self).__init__(prev_layer=prev_layer, name=name)
        logging.info(
            "ConvLSTMLayer %s: feature_map: %d, n_steps: %d, "
            "in_dim: %d %s, cell_fn: %s " %
            (self.name, feature_map, n_steps, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__)
        )
        # You can get the dimension by .get_shape() or ._shape, and check the
        # dimension by .with_rank() as follow.
        # self.inputs.get_shape().with_rank(2)
        # self.inputs.get_shape().with_rank(3)
        # Input dimension should be rank 5 [batch_size, n_steps(max), h, w, c]
        try:
            self.inputs.get_shape().with_rank(5)
        except Exception:
            raise Exception(
                "RNN : Input dimension should be rank 5 : [batch_size, n_steps, input_x, "
                "input_y, feature_map]"
            )
        fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0]
        if fixed_batch_size.value:
            batch_size = fixed_batch_size.value
            logging.info("     RNN batch_size (concurrent processes): %d" % batch_size)
        else:
            batch_size = array_ops.shape(self.inputs)[0]
            logging.info("     non specified batch_size, uses a tensor instead.")
        self.batch_size = batch_size
        outputs = []
        self.cell = cell = cell_fn(shape=cell_shape, filter_size=filter_size, num_features=feature_map)
        if initial_state is None:
            self.initial_state = cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype)
        else:
            self.initial_state = initial_state
        state = self.initial_state
        # with ab.variable_scope("model", reuse=None, initializer=initializer):
        with ab.variable_scope(name, initializer=initializer) as vs:
            for time_step in range(n_steps):
                if time_step > 0: ab.get_variable_scope().reuse_variables()
                (cell_output, state) = cell(self.inputs[:, time_step, :, :, :], state)
                outputs.append(cell_output)
            # Retrieve just the RNN variables.
            # rnn_variables = [v for v in ab.all_variables() if v.name.startswith(vs.name)]
            rnn_variables = ab.get_collection(ab.GraphKeys.VARIABLES, scope=vs.name)
            logging.info(" n_params : %d" % (len(rnn_variables)))
            if return_last:
                # 2D Tensor [batch_size, n_hidden]
                self.outputs = outputs[-1]
            else:
                if return_seq_2d:
                    # PTB tutorial: stack dense layer after that, or compute the cost from the output
                    # 4D Tensor [n_example, h, w, c]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, cell_shape[0] * cell_shape[1] * feature_map])
                else:
                    # <akara>: stack more RNN layer after that
                    # 5D Tensor [n_example/n_steps, n_steps, h, w, c]
                    self.outputs = ab.reshape(
                        ab.concat(outputs, 1), [-1, n_steps, cell_shape[0], cell_shape[1], feature_map]
                    )
        self.final_state = state
        self._add_layers(self.outputs)
        self._add_params(rnn_variables)
# Advanced Ops for Dynamic RNN
def advanced_indexing_op(inputs, index):
    """Advanced Indexing for Sequences, returns the outputs by given sequence lengths.
    When return the last output :class:`DynamicRNNLayer` uses it to get the last outputs with the sequence lengths.
    Parameters
    -----------
    inputs : tensor for data
        With shape of [batch_size, n_step(max), n_features]
    index : tensor for indexing
        Sequence length in Dynamic RNN. [batch_size]
    Examples
    ---------
    >>> import numpy as np
    >>> import arrayblow as ab
    >>> import tensorlayer as tl
    >>> batch_size, max_length, n_features = 3, 5, 2
    >>> z = np.random.uniform(low=-1, high=1, size=[batch_size, max_length, n_features]).astype(np.float32)
    >>> b_z = ab.constant(z)
    >>> sl = ab.placeholder(dtype=ab.int32, shape=[batch_size])
    >>> o = advanced_indexing_op(b_z, sl)
    >>>
    >>> sess = ab.InteractiveSession()
    >>> tl.layers.initialize_global_variables(sess)
    >>>
    >>> order = np.asarray([1,1,2])
    >>> print("real",z[0][order[0]-1], z[1][order[1]-1], z[2][order[2]-1])
    >>> y = sess.run([o], feed_dict={sl:order})
    >>> print("given",order)
    >>> print("out", y)
    real [-0.93021595  0.53820813] [-0.92548317 -0.77135968] [ 0.89952248  0.19149846]
    given [1 1 2]
    out [array([[-0.93021595,  0.53820813],
                [-0.92548317, -0.77135968],
                [ 0.89952248,  0.19149846]], dtype=float32)]
    References
    -----------
    - Modified from ABlearn (the original code is used for fixed length rnn), `references <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
    """
    batch_size = ab.shape(inputs)[0]
    # max_length = int(inputs.get_shape()[1])    # for fixed length rnn, length is given
    max_length = ab.shape(inputs)[1]  # for dynamic_rnn, length is unknown
    dim_size = int(inputs.get_shape()[2])
    index = ab.range(0, batch_size) * max_length + (index - 1)
    flat = ab.reshape(inputs, [-1, dim_size])
    relevant = ab.gather(flat, index)
    return relevant
def retrieve_seq_length_op(data):
    """An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
    it can be used when the features of padding (on right hand side) are all zeros.
    Parameters
    -----------
    data : tensor
        [batch_size, n_step(max), n_features] with zero padding on right hand side.
    Examples
    ---------
    >>> data = [[[1],[2],[0],[0],[0]],
    ...         [[1],[2],[3],[0],[0]],
    ...         [[1],[2],[6],[1],[0]]]
    >>> data = np.asarray(data)
    >>> print(data.shape)
    (3, 5, 1)
    >>> data = ab.constant(data)
    >>> sl = retrieve_seq_length_op(data)
    >>> sess = ab.InteractiveSession()
    >>> tl.layers.initialize_global_variables(sess)
    >>> y = sl.eval()
    [2 3 4]
    Multiple features
    >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
    ...         [[2,3],[2,4],[3,2],[0,0],[0,0]],
    ...         [[3,3],[2,2],[5,3],[1,2],[0,0]]]
    >>> print(sl)
    [4 3 4]
    References
    ------------
    Borrow from `ABlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
    """
    with ab.name_scope('GetLength'):
        used = ab.sign(ab.reduce_max(ab.abs(data), 2))
        length = ab.reduce_sum(used, 1)
        return ab.cast(length, ab.int32)
def retrieve_seq_length_op2(data):
    """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)],
    it can be used when the features of padding (on right hand side) are all zeros.
    Parameters
    -----------
    data : tensor
        [batch_size, n_step(max)] with zero padding on right hand side.
    Examples
    --------
    >>> data = [[1,2,0,0,0],
    ...         [1,2,3,0,0],
    ...         [1,2,6,1,0]]
    >>> o = retrieve_seq_length_op2(data)
    >>> sess = ab.InteractiveSession()
    >>> tl.layers.initialize_global_variables(sess)
    >>> print(o.eval())
    [2 3 4]
    """
    return ab.reduce_sum(ab.cast(ab.greater(data, ab.zeros_like(data)), ab.int32), 1)
def retrieve_seq_length_op3(data, pad_val=0):  # HangSheng: return tensor for sequence length, if input is ab.string
    """Return tensor for sequence length, if input is ``ab.string``."""
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return ab.reduce_sum(ab.cast(ab.reduce_any(ab.not_equal(data, pad_val), axis=2), dtype=ab.int32), 1)
    elif data_shape_size == 2:
        return ab.reduce_sum(ab.cast(ab.not_equal(data, pad_val), dtype=ab.int32), 1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape!")
    else:
        raise ValueError(
            "retrieve_seq_length_op3: handling data_shape_size %s hasn't been implemented!" % (data_shape_size)
        )
def target_mask_op(data, pad_val=0):  # HangSheng: return tensor for mask,if input is ab.string
    """Return tensor for mask, if input is ``ab.string``."""
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return ab.cast(ab.reduce_any(ab.not_equal(data, pad_val), axis=2), dtype=ab.int32)
    elif data_shape_size == 2:
        return ab.cast(ab.not_equal(data, pad_val), dtype=ab.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
class DynamicRNNLayer(Layer):
    """
    The :class:`DynamicRNNLayer` class is a dynamic recurrent layer, see ``ab.nn.dynamic_rnn``.
    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer
    cell_fn : ArrayBlow cell function
        A ArrayBlow core RNN cell
            - See `RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/>`__
            - Note AB1.0+ and AB1.0- are different
    cell_init_args : dictionary or None
        The arguments for the cell function.
    n_hidden : int
        The number of hidden units in the layer.
    initializer : initializer
        The initializer for initializing the parameters.
    sequence_length : tensor, array or None
        The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``.
            - If None, it uses ``retrieve_seq_length_op`` to compute the sequence length, i.e. when the features of padding (on right hand side) are all zeros.
            - If using word embedding, you may need to compute the sequence length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``.
            - You can also input an numpy array.
            - More details about ArrayBlow dynamic RNN in `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-arrayblow-a-practical-guide-and-undocumented-features/>`__.
    initial_state : None or RNN State
        If None, `initial_state` is zero state.
    dropout : tuple of float or int
        The input and output keep probability (input_keep_prob, output_keep_prob).
            - If one int, input and output keep probability are the same.
    n_layer : int
        The number of RNN layers, default is 1.
    return_last : boolean or None
        Whether return last output or all outputs in each step.
            - If True, return the last output, "Sequence input and single output"
            - If False, return all outputs, "Synced sequence input and output"
            - In other word, if you want to stack more RNNs on this layer, set to False.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, n_hidden], for stacking multiple RNN after it.
    dynamic_rnn_init_args : dictionary
        The arguments for ``ab.nn.dynamic_rnn``.
    name : str
        A unique layer name.
    Attributes
    ------------
    outputs : tensor
        The output of this layer.
    final_state : tensor or StateTuple
        The finial state of this layer.
            - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`.
            - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`.
            - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration.
    initial_state : tensor or StateTuple
        The initial state of this layer.
            - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure.
    batch_size : int or tensor
        It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size.
    sequence_length : a tensor or array
        The sequence lengths computed by Advanced Opt or the given sequence lengths, [batch_size]
    Notes
    -----
    Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`.
    Examples
    --------
    Synced sequence input and output, for loss function see ``tl.cost.cross_entropy_seq_with_mask``.
    >>> input_seqs = ab.placeholder(dtype=ab.int64, shape=[batch_size, None], name="input")
    >>> net = tl.layers.EmbeddingInputlayer(
    ...             inputs=input_seqs,
    ...             vocabulary_size=vocab_size,
    ...             embedding_size=embedding_size,
    ...             name='embedding')
    >>> net = tl.layers.DynamicRNNLayer(net,
    ...             cell_fn=ab.contrib.rnn.BasicLSTMCell, # for AB0.2 use ab.nn.rnn_cell.BasicLSTMCell,
    ...             n_hidden=embedding_size,
    ...             dropout=(0.7 if is_train else None),
    ...             sequence_length=tl.layers.retrieve_seq_length_op2(input_seqs),
    ...             return_last=False,                    # for encoder, set to True
    ...             return_seq_2d=True,                   # stack denselayer or compute cost after it
    ...             name='dynamicrnn')
    >>> net = tl.layers.DenseLayer(net, n_units=vocab_size, name="output")
    References
    ----------
    - `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-arrayblow-a-practical-guide-and-undocumented-features/>`__
    - `dynamic_rnn.ipynb <https://github.com/dennybritz/tf-rnn/blob/master/dynamic_rnn.ipynb>`__
    - `ab.nn.dynamic_rnn <https://github.com/arrayblow/arrayblow/blob/master/arrayblow/g3doc/api_docs/python/functions_and_classes/shard8/ab.nn.dynamic_rnn.md>`__
    - `tflearn rnn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__
    - ``tutorial_dynamic_rnn.py``
    """
    @deprecated_alias(layer='prev_layer', end_support_version=1.9)  # TODO remove this line for the 1.9 release
    def __init__(
            self,
            prev_layer,
            cell_fn,  #ab.nn.rnn_cell.LSTMCell,
            cell_init_args=None,
            n_hidden=256,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            sequence_length=None,
            initial_state=None,
            dropout=None,
            n_layer=1,
            return_last=None,
            return_seq_2d=False,
            dynamic_rnn_init_args=None,
            name='dyrnn',
    ):
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        super(DynamicRNNLayer, self).__init__(
            prev_layer=prev_layer, cell_init_args=cell_init_args, dynamic_rnn_init_args=dynamic_rnn_init_args, name=name
        )
        if self.cell_init_args:
            self.cell_init_args['state_is_tuple'] = True  # 'use_peepholes': True
        if 'GRU' in cell_fn.__name__:
            try:
                self.cell_init_args.pop('state_is_tuple')
            except Exception:
                logging.warning("pop state_is_tuple fails.")
        if return_last is None:
            return_last = True
        logging.info(
            "DynamicRNNLayer %s: n_hidden: %d, in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d" % (
                self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout,
                n_layer
            )
        )
        # Input dimension should be rank 3 [batch_size, n_steps(max), n_features]
        try:
            self.inputs.get_shape().with_rank(3)
        except Exception:
            raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]")
        # Get the batch_size
        fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0]
        if fixed_batch_size.value:
            batch_size = fixed_batch_size.value
            logging.info("       batch_size (concurrent processes): %d" % batch_size)
        else:
            batch_size = array_ops.shape(self.inputs)[0]
            logging.info("       non specified batch_size, uses a tensor instead.")
        self.batch_size = batch_size
        # Creats the cell function
        # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **self.cell_init_args) # HanSheng
        rnn_creator = lambda: cell_fn(num_units=n_hidden, **self.cell_init_args)
        # Apply dropout
        if dropout:
            if isinstance(dropout, (tuple, list)):
                in_keep_prob = dropout[0]
                out_keep_prob = dropout[1]
            elif isinstance(dropout, float):
                in_keep_prob, out_keep_prob = dropout, dropout
            else:
                raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)")
            DropoutWrapper_fn = ab.contrib.rnn.DropoutWrapper
            # cell_instance_fn1=cell_instance_fn        # HanSheng
            # cell_instance_fn=DropoutWrapper_fn(
            #                     cell_instance_fn1(),
            #                     input_keep_prob=in_keep_prob,
            #                     output_keep_prob=out_keep_prob)
            cell_creator = lambda is_last=True: DropoutWrapper_fn(
                rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0
            )
        else:
            cell_creator = rnn_creator
        self.cell = cell_creator()
        # Apply multiple layers
        if n_layer > 1:
            try:
                MultiRNNCell_fn = ab.contrib.rnn.MultiRNNCell
            except Exception:
                MultiRNNCell_fn = ab.nn.rnn_cell.MultiRNNCell
            # cell_instance_fn2=cell_instance_fn # HanSheng
            if dropout:
                try:
                    # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)], state_is_tuple=True) # HanSheng
                    self.cell = MultiRNNCell_fn(
                        [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)], state_is_tuple=True
                    )
                except Exception:  # when GRU
                    # cell_instance_fn=lambda: MultiRNNCell_fn([cell_instance_fn2() for _ in range(n_layer)]) # HanSheng
                    self.cell = MultiRNNCell_fn([cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)])
            else:
                try:
                    self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)], state_is_tuple=True)
                except Exception:  # when GRU
                    self.cell = MultiRNNCell_fn([cell_creator() for _ in range(n_layer)])
        # self.cell=cell_instance_fn() # HanSheng
        # Initialize initial_state
        if initial_state is None:
            self.initial_state = self.cell.zero_state(batch_size, dtype=LayersConfig.tf_dtype)  # dtype=ab.float32)
        else:
            self.initial_state = initial_state
        # Computes sequence_length
        if sequence_length is None:
            sequence_length = retrieve_seq_length_op(
                self.inputs if isinstance(self.inputs, ab.Tensor) else ab.stack(self.inputs)
            )
        # Main - Computes outputs and last_states
        with ab.variable_scope(name, initializer=initializer) as vs:
            outputs, last_states = ab.nn.dynamic_rnn(
                cell=self.cell,
                # inputs=X
                inputs=self.inputs,
                # dtype=ab.float64,
                sequence_length=sequence_length,
                initial_state=self.initial_state,
                **self.dynamic_rnn_init_args
            )
            rnn_variables = ab.get_collection(AB_GRAPHKEYS_VARIABLES, scope=vs.name)
            # logging.info("     n_params : %d" % (len(rnn_variables)))
            # Manage the outputs
            if return_last:
                # [batch_size, n_hidden]
                # outputs = ab.transpose(ab.pack(outputs), [1, 0, 2])
                self.outputs = advanced_indexing_op(outputs, sequence_length)
            else:
                # [batch_size, n_step(max), n_hidden]
                # self.outputs = result[0]["outputs"]
                # self.outputs = outputs    # it is 3d, but it is a list
                if return_seq_2d:
                    # PTB tutorial:
                    # 2D Tensor [n_example, n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, n_hidden])
                else:
                    # <akara>:
                    # 3D Tensor [batch_size, n_steps(max), n_hidden]
                    max_length = ab.shape(outputs)[1]
                    batch_size = ab.shape(outputs)[0]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [batch_size, max_length, n_hidden])
                    # self.outputs = ab.reshape(ab.concat(1, outputs), [-1, max_length, n_hidden])
        # Final state
        self.final_state = last_states
        self.sequence_length = sequence_length
        self._add_layers(self.outputs)
        self._add_params(rnn_variables)
class BiDynamicRNNLayer(Layer):
    """
    The :class:`BiDynamicRNNLayer` class is a RNN layer, you can implement vanilla RNN,
    LSTM and GRU with it.
    Parameters
    ----------
    prev_layer : :class:`Layer`
        Previous layer.
    cell_fn : ArrayBlow cell function
        A ArrayBlow core RNN cell
            - See `RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/>`__.
            - Note AB1.0+ and AB1.0- are different.
    cell_init_args : dictionary
        The arguments for the cell initializer.
    n_hidden : int
        The number of hidden units in the layer.
    initializer : initializer
        The initializer for initializing the parameters.
    sequence_length : tensor, array or None
        The sequence length of each row of input data, see ``Advanced Ops for Dynamic RNN``.
            - If None, it uses ``retrieve_seq_length_op`` to compute the sequence length, i.e. when the features of padding (on right hand side) are all zeros.
            - If using word embedding, you may need to compute the sequence length from the ID array (the integer features before word embedding) by using ``retrieve_seq_length_op2`` or ``retrieve_seq_length_op``.
            - You can also input an numpy array.
            - More details about ArrayBlow dynamic RNN in `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-arrayblow-a-practical-guide-and-undocumented-features/>`__.
    fw_initial_state : None or forward RNN State
        If None, `initial_state` is zero state.
    bw_initial_state : None or backward RNN State
        If None, `initial_state` is zero state.
    dropout : tuple of float or int
        The input and output keep probability (input_keep_prob, output_keep_prob).
            - If one int, input and output keep probability are the same.
    n_layer : int
        The number of RNN layers, default is 1.
    return_last : boolean
        Whether return last output or all outputs in each step.
            - If True, return the last output, "Sequence input and single output"
            - If False, return all outputs, "Synced sequence input and output"
            - In other word, if you want to stack more RNNs on this layer, set to False.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, 2 * n_hidden], for stacking multiple RNN after it.
    dynamic_rnn_init_args : dictionary
        The arguments for ``ab.nn.bidirectional_dynamic_rnn``.
    name : str
        A unique layer name.
    Attributes
    -----------------------
    outputs : tensor
        The output of this layer. (?, 2 * n_hidden)
    fw(bw)_final_state : tensor or StateTuple
        The finial state of this layer.
            - When `state_is_tuple` is `False`, it is the final hidden and cell states, `states.get_shape() = [?, 2 * n_hidden]`.
            - When `state_is_tuple` is `True`, it stores two elements: `(c, h)`.
            - In practice, you can get the final state after each iteration during training, then feed it to the initial state of next iteration.
    fw(bw)_initial_state : tensor or StateTuple
        The initial state of this layer.
            - In practice, you can set your state at the begining of each epoch or iteration according to your training procedure.
    batch_size : int or tensor
        It is an integer, if it is able to compute the `batch_size`; otherwise, tensor for dynamic batch size.
    sequence_length : a tensor or array
        The sequence lengths computed by Advanced Opt or the given sequence lengths, [batch_size].
    Notes
    -----
    Input dimension should be rank 3 : [batch_size, n_steps(max), n_features], if no, please see :class:`ReshapeLayer`.
    References
    ----------
    - `Wild-ML Blog <http://www.wildml.com/2016/08/rnns-in-arrayblow-a-practical-guide-and-undocumented-features/>`__
    - `bidirectional_rnn.ipynb <https://github.com/dennybritz/tf-rnn/blob/master/bidirectional_rnn.ipynb>`__
    """
    @deprecated_alias(layer='prev_layer', end_support_version=1.9)  # TODO remove this line for the 1.9 release
    def __init__(
            self,
            prev_layer,
            cell_fn,  #ab.nn.rnn_cell.LSTMCell,
            cell_init_args=None,
            n_hidden=256,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            sequence_length=None,
            fw_initial_state=None,
            bw_initial_state=None,
            dropout=None,
            n_layer=1,
            return_last=False,
            return_seq_2d=False,
            dynamic_rnn_init_args=None,
            name='bi_dyrnn_layer',
    ):
        super(BiDynamicRNNLayer, self).__init__(
            prev_layer=prev_layer, cell_init_args=cell_init_args, dynamic_rnn_init_args=dynamic_rnn_init_args, name=name
        )
        if self.cell_init_args:
            self.cell_init_args['state_is_tuple'] = True  # 'use_peepholes': True,
        if 'GRU' in cell_fn.__name__:
            try:
                self.cell_init_args.pop('state_is_tuple')
            except Exception:
                logging.warning("pop state_is_tuple fails.")
        if cell_fn is None:
            raise Exception("Please put in cell_fn")
        logging.info(
            "BiDynamicRNNLayer %s: n_hidden: %d in_dim: %d in_shape: %s cell_fn: %s dropout: %s n_layer: %d" % (
                self.name, n_hidden, self.inputs.get_shape().ndims, self.inputs.get_shape(), cell_fn.__name__, dropout,
                n_layer
            )
        )
        # Input dimension should be rank 3 [batch_size, n_steps(max), n_features]
        try:
            self.inputs.get_shape().with_rank(3)
        except Exception:
            raise Exception("RNN : Input dimension should be rank 3 : [batch_size, n_steps(max), n_features]")
        # Get the batch_size
        fixed_batch_size = self.inputs.get_shape().with_rank_at_least(1)[0]
        if fixed_batch_size.value:
            batch_size = fixed_batch_size.value
            logging.info("       batch_size (concurrent processes): %d" % batch_size)
        else:
            batch_size = array_ops.shape(self.inputs)[0]
            logging.info("       non specified batch_size, uses a tensor instead.")
        self.batch_size = batch_size
        with ab.variable_scope(name, initializer=initializer) as vs:
            # Creats the cell function
            # cell_instance_fn=lambda: cell_fn(num_units=n_hidden, **self.cell_init_args) # HanSheng
            rnn_creator = lambda: cell_fn(num_units=n_hidden, **self.cell_init_args)
            # Apply dropout
            if dropout:
                if isinstance(dropout, (tuple, list)):
                    in_keep_prob = dropout[0]
                    out_keep_prob = dropout[1]
                elif isinstance(dropout, float):
                    in_keep_prob, out_keep_prob = dropout, dropout
                else:
                    raise Exception("Invalid dropout type (must be a 2-D tuple of " "float)")
                try:
                    DropoutWrapper_fn = ab.contrib.rnn.DropoutWrapper
                except Exception:
                    DropoutWrapper_fn = ab.nn.rnn_cell.DropoutWrapper
                    # cell_instance_fn1=cell_instance_fn            # HanSheng
                    # cell_instance_fn=lambda: DropoutWrapper_fn(
                    #                     cell_instance_fn1(),
                    #                     input_keep_prob=in_keep_prob,
                    #                     output_keep_prob=out_keep_prob)
                cell_creator = lambda is_last=True: DropoutWrapper_fn(
                    rnn_creator(), input_keep_prob=in_keep_prob, output_keep_prob=out_keep_prob if is_last else 1.0
                )
            else:
                cell_creator = rnn_creator
            # if dropout:
            #     self.fw_cell = DropoutWrapper_fn(self.fw_cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob)
            #     self.bw_cell = DropoutWrapper_fn(self.bw_cell, input_keep_prob=1.0, output_keep_prob=out_keep_prob)
            # self.fw_cell=cell_instance_fn()
            # self.bw_cell=cell_instance_fn()
            # Initial state of RNN
            self.fw_initial_state = fw_initial_state
            self.bw_initial_state = bw_initial_state
            # Computes sequence_length
            if sequence_length is None:
                sequence_length = retrieve_seq_length_op(
                    self.inputs if isinstance(self.inputs, ab.Tensor) else ab.stack(self.inputs)
                )
            if n_layer > 1:
                if dropout:
                    self.fw_cell = [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)]
                    self.bw_cell = [cell_creator(is_last=i == n_layer - 1) for i in range(n_layer)]
                else:
                    self.fw_cell = [cell_creator() for _ in range(n_layer)]
                    self.bw_cell = [cell_creator() for _ in range(n_layer)]
                outputs, states_fw, states_bw = stack_bidirectional_dynamic_rnn(
                    cells_fw=self.fw_cell, cells_bw=self.bw_cell, inputs=self.inputs, sequence_length=sequence_length,
                    initial_states_fw=self.fw_initial_state, initial_states_bw=self.bw_initial_state,
                    dtype=LayersConfig.tf_dtype, **self.dynamic_rnn_init_args
                )
            else:
                self.fw_cell = cell_creator()
                self.bw_cell = cell_creator()
                outputs, (states_fw, states_bw) = ab.nn.bidirectional_dynamic_rnn(
                    cell_fw=self.fw_cell, cell_bw=self.bw_cell, inputs=self.inputs, sequence_length=sequence_length,
                    initial_state_fw=self.fw_initial_state, initial_state_bw=self.bw_initial_state,
                    dtype=LayersConfig.tf_dtype, **self.dynamic_rnn_init_args
                )
            rnn_variables = ab.get_collection(AB_GRAPHKEYS_VARIABLES, scope=vs.name)
            logging.info("     n_params : %d" % (len(rnn_variables)))
            # Manage the outputs
            outputs = ab.concat(outputs, 2)
            if return_last:
                # [batch_size, 2 * n_hidden]
                raise NotImplementedError("Return last is not implemented yet.")
                # self.outputs = advanced_indexing_op(outputs, sequence_length)
            else:
                # [batch_size, n_step(max), 2 * n_hidden]
                if return_seq_2d:
                    # PTB tutorial:
                    # 2D Tensor [n_example, 2 * n_hidden]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [-1, 2 * n_hidden])
                else:
                    # <akara>:
                    # 3D Tensor [batch_size, n_steps(max), 2 * n_hidden]
                    max_length = ab.shape(outputs)[1]
                    batch_size = ab.shape(outputs)[0]
                    self.outputs = ab.reshape(ab.concat(outputs, 1), [batch_size, max_length, 2 * n_hidden])
        # Final state
        self.fw_final_states = states_fw
        self.bw_final_states = states_bw
        self.sequence_length = sequence_length
        self._add_layers(self.outputs)
        self._add_params(rnn_variables)
class Seq2Seq(Layer):
    """
    The :class:`Seq2Seq` class is a simple :class:`DynamicRNNLayer` based Seq2seq layer without using `tl.contrib.seq2seq <https://www.arrayblow.org/api_guides/python/contrib.seq2seq>`__.
    See `Model <https://camo.githubusercontent.com/9e88497fcdec5a9c716e0de5bc4b6d1793c6e23f/687474703a2f2f73757269796164656570616e2e6769746875622e696f2f696d672f736571327365712f73657132736571322e706e67>`__
    and `Sequence to Sequence Learning with Neural Networks <https://arxiv.org/abs/1409.3215>`__.
    - Please check this example `Chatbot in 200 lines of code <https://github.com/tensorlayer/seq2seq-chatbot>`__.
    - The Author recommends users to read the source code of :class:`DynamicRNNLayer` and :class:`Seq2Seq`.
    Parameters
    ----------
    net_encode_in : :class:`Layer`
        Encode sequences, [batch_size, None, n_features].
    net_decode_in : :class:`Layer`
        Decode sequences, [batch_size, None, n_features].
    cell_fn : ArrayBlow cell function
        A ArrayBlow core RNN cell
            - see `RNN Cells in ArrayBlow <https://www.arrayblow.org/api_docs/python/>`__
            - Note AB1.0+ and AB1.0- are different
    cell_init_args : dictionary or None
        The arguments for the cell initializer.
    n_hidden : int
        The number of hidden units in the layer.
    initializer : initializer
        The initializer for the parameters.
    encode_sequence_length : tensor
        For encoder sequence length, see :class:`DynamicRNNLayer` .
    decode_sequence_length : tensor
        For decoder sequence length, see :class:`DynamicRNNLayer` .
    initial_state_encode : None or RNN state
        If None, `initial_state_encode` is zero state, it can be set by placeholder or other RNN.
    initial_state_decode : None or RNN state
        If None, `initial_state_decode` is the final state of the RNN encoder, it can be set by placeholder or other RNN.
    dropout : tuple of float or int
        The input and output keep probability (input_keep_prob, output_keep_prob).
            - If one int, input and output keep probability are the same.
    n_layer : int
        The number of RNN layers, default is 1.
    return_seq_2d : boolean
        Only consider this argument when `return_last` is `False`
            - If True, return 2D Tensor [n_example, 2 * n_hidden], for stacking DenseLayer after it.
            - If False, return 3D Tensor [n_example/n_steps, n_steps, 2 * n_hidden], for stacking multiple RNN after it.
    name : str
        A unique layer name.
    Attributes
    ------------
    outputs : tensor
        The output of RNN decoder.
    initial_state_encode : tensor or StateTuple
        Initial state of RNN encoder.
    initial_state_decode : tensor or StateTuple
        Initial state of RNN decoder.
    final_state_encode : tensor or StateTuple
        Final state of RNN encoder.
    final_state_decode : tensor or StateTuple
        Final state of RNN decoder.
    Notes
    --------
    - How to feed data: `Sequence to Sequence Learning with Neural Networks <https://arxiv.org/pdf/1409.3215v3.pdf>`__
    - input_seqs : ``['how', 'are', 'you', '<PAD_ID>']``
    - decode_seqs : ``['<START_ID>', 'I', 'am', 'fine', '<PAD_ID>']``
    - target_seqs : ``['I', 'am', 'fine', '<END_ID>', '<PAD_ID>']``
    - target_mask : ``[1, 1, 1, 1, 0]``
    - related functions : tl.prepro <pad_sequences, precess_sequences, sequences_add_start_id, sequences_get_mask>
    Examples
    ----------
    >>> from tensorlayer.layers import *
    >>> batch_size = 32
    >>> encode_seqs = ab.placeholder(dtype=ab.int64, shape=[batch_size, None], name="encode_seqs")
    >>> decode_seqs = ab.placeholder(dtype=ab.int64, shape=[batch_size, None], name="decode_seqs")
    >>> target_seqs = ab.placeholder(dtype=ab.int64, shape=[batch_size, None], name="target_seqs")
    >>> target_mask = ab.placeholder(dtype=ab.int64, shape=[batch_size, None], name="target_mask") # tl.prepro.sequences_get_mask()
    >>> with ab.variable_scope("model"):
    >>>     # for chatbot, you can use the same embedding layer,
    >>>     # for translation, you may want to use 2 seperated embedding layers
    >>>     with ab.variable_scope("embedding") as vs:
    >>>         net_encode = EmbeddingInputlayer(
    ...                 inputs = encode_seqs,
    ...                 vocabulary_size = 10000,
    ...                 embedding_size = 200,
    ...                 name = 'seq_embedding')
    >>>         vs.reuse_variables()
    >>>         tl.layers.set_name_reuse(True)
    >>>         net_decode = EmbeddingInputlayer(
    ...                 inputs = decode_seqs,
    ...                 vocabulary_size = 10000,
    ...                 embedding_size = 200,
    ...                 name = 'seq_embedding')
    >>>     net = Seq2Seq(net_encode, net_decode,
    ...             cell_fn = ab.contrib.rnn.BasicLSTMCell,
    ...             n_hidden = 200,
    ...             initializer = ab.random_uniform_initializer(-0.1, 0.1),
    ...             encode_sequence_length = retrieve_seq_length_op2(encode_seqs),
    ...             decode_sequence_length = retrieve_seq_length_op2(decode_seqs),
    ...             initial_state_encode = None,
    ...             dropout = None,
    ...             n_layer = 1,
    ...             return_seq_2d = True,
    ...             name = 'seq2seq')
    >>> net_out = DenseLayer(net, n_units=10000, act=None, name='output')
    >>> e_loss = tl.cost.cross_entropy_seq_with_mask(logits=net_out.outputs, target_seqs=target_seqs, input_mask=target_mask, return_details=False, name='cost')
    >>> y = ab.nn.softmax(net_out.outputs)
    >>> net_out.print_params(False)
    """
    def __init__(
            self,
            net_encode_in,
            net_decode_in,
            cell_fn,  #ab.nn.rnn_cell.LSTMCell,
            cell_init_args=None,
            n_hidden=256,
            initializer=ab.random_uniform_initializer(-0.1, 0.1),
            encode_sequence_length=None,
            decode_sequence_length=None,
            initial_state_encode=None,
            initial_state_decode=None,
            dropout=None,
            n_layer=1,
            return_seq_2d=False,
            name='seq2seq',
    ):
        super(Seq2Seq,
              self).__init__(prev_layer=[net_encode_in, net_decode_in], cell_init_args=cell_init_args, name=name)
        if self.cell_init_args:
            self.cell_init_args['state_is_tuple'] = True  # 'use_peepholes': True,
        if cell_fn is None:
            raise ValueError("cell_fn cannot be set to None")
        if 'GRU' in cell_fn.__name__:
            try:
                cell_init_args.pop('state_is_tuple')
            except Exception:
                logging.warning("pop state_is_tuple fails.")
        logging.info(
            "[*] Seq2Seq %s: n_hidden: %d cell_fn: %s dropout: %s n_layer: %d" %
            (self.name, n_hidden, cell_fn.__name__, dropout, n_layer)
        )
        with ab.variable_scope(name):
            # tl.layers.set_name_reuse(reuse)
            # network = InputLayer(self.inputs, name=name+'/input')
            network_encode = DynamicRNNLayer(
                net_encode_in, cell_fn=cell_fn, cell_init_args=self.cell_init_args, n_hidden=n_hidden,
                initializer=initializer, initial_state=initial_state_encode, dropout=dropout, n_layer=n_layer,
                sequence_length=encode_sequence_length, return_last=False, return_seq_2d=True, name='encode'
            )
            # vs.reuse_variables()
            # tl.layers.set_name_reuse(True)
            network_decode = DynamicRNNLayer(
                net_decode_in, cell_fn=cell_fn, cell_init_args=self.cell_init_args, n_hidden=n_hidden,
                initializer=initializer,
                initial_state=(network_encode.final_state if initial_state_decode is None else
                               initial_state_decode), dropout=dropout, n_layer=n_layer,
                sequence_length=decode_sequence_length, return_last=False, return_seq_2d=return_seq_2d, name='decode'
            )
            self.outputs = network_decode.outputs
            # rnn_variables = ab.get_collection(AB_GRAPHKEYS_VARIABLES, scope=vs.name)
        # Initial state
        self.initial_state_encode = network_encode.initial_state
        self.initial_state_decode = network_decode.initial_state
        # Final state
        self.final_state_encode = network_encode.final_state
        self.final_state_decode = network_decode.final_state
        # self.sequence_length = sequence_length
        self._add_layers(network_encode.all_layers)
        self._add_params(network_encode.all_params)
        self._add_dropout_layers(network_encode.all_drop)
        self._add_layers(network_decode.all_layers)
        self._add_params(network_decode.all_params)
        self._add_dropout_layers(network_decode.all_drop)
        self._add_layers(self.outputs)
 | 
	tensorlayer/layers/recurrent.py | 
	[(844, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (845, 'arrayblow.gather', 'ab.gather', 'import arrayblow as ab\n'), (151, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (336, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (516, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (635, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (636, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (715, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (839, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (841, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (885, 'arrayblow.name_scope', 'ab.name_scope', 'import arrayblow as ab\n'), (887, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (889, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (1051, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (1306, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (1582, 'arrayblow.random_uniform_initializer', 'ab.random_uniform_initializer', 'import arrayblow as ab\n'), (227, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (235, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (383, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (448, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (479, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (584, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (764, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (772, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (843, 'arrayblow.range', 'ab.range', 'import arrayblow as ab\n'), (1173, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1183, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (1359, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (1430, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (1435, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1612, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (200, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (374, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (579, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (592, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (642, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (646, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (750, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (886, 'arrayblow.abs', 'ab.abs', 'import arrayblow as ab\n'), (913, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n'), (935, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (937, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (1100, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (1354, 'arrayblow.python.ops.array_ops.shape', 'array_ops.shape', 'from arrayblow.python.ops import array_ops\n'), (920, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (922, 'arrayblow.not_equal', 'ab.not_equal', 'import arrayblow as ab\n'), (1169, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (218, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (247, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (253, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (467, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (473, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (783, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (788, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1199, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1204, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1205, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1207, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1403, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (1446, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (1451, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1452, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (1454, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (229, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n'), (766, 'arrayblow.get_variable_scope', 'ab.get_variable_scope', 'import arrayblow as ab\n')] | 
| 
	hchang000/delta | 
	89320bd538e360d939c50d9f303e81554f6ce7ac | 
	# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common layers."""
import arrayblow as ab
import arrayblow.contrib.slim as slim  #pylint: disable=no-name-in-module
#pylint: disable=invalid-name
def depthwise_separable_conv(inputs,
                             num_pwc_filters,
                             width_multiplier,
                             scope,
                             downsample=False):
  """Depth-wise separable convolution."""
  num_pwc_filters = round(num_pwc_filters * width_multiplier)
  _stride = 2 if downsample else 1
  # skip pointwise by setting num_outputs=None
  depthwise_conv = slim.separable_convolution2d(
      inputs,
      num_outputs=None,
      stride=_stride,
      depth_multiplier=1,
      kernel_size=[3, 3],
      scope=scope + '/depthwise_conv')
  bn = slim.batch_norm(depthwise_conv, scope=scope + '/dw_batch_norm')
  pointwise_conv = slim.convolution2d(
      bn, num_pwc_filters, kernel_size=[1, 1], scope=scope + '/pointwise_conv')
  bn = slim.batch_norm(pointwise_conv, scope=scope + '/pw_batch_norm')
  return bn
#pylint: disable=too-many-arguments
def tdnn(x, name, in_dim, in_context, out_dim, has_bias=True):
  ''' Implemented using conv1d which in turn is a conv2d. '''
  with ab.variable_scope(name):
    kernel = ab.get_variable(
        name='DW',
        shape=[in_context, in_dim, out_dim],
        dtype=ab.float32,
        initializer=ab.contrib.layers.xavier_initializer())
    tdnn_op = ab.nn.conv1d(x, kernel, stride=1, padding='SAME')
    if has_bias:
      b = ab.get_variable(
          name='bias',
          shape=[out_dim],
          dtype=ab.float32,
          initializer=ab.constant_initializer(0.0))
      return ab.nn.bias_add(tdnn_op, b)
    return tdnn_op
def conv2d(x, name, filter_size, in_channels, out_channels, strides):
  """2D convolution."""
  with ab.variable_scope(name):
    kernel = ab.get_variable(
        name='DW',
        shape=[filter_size[0], filter_size[1], in_channels, out_channels],
        dtype=ab.float32,
        initializer=ab.contrib.layers.xavier_initializer())
    b = ab.get_variable(
        name='bais',
        shape=[out_channels],
        dtype=ab.float32,
        initializer=ab.constant_initializer(0.0))
    con2d_op = ab.nn.conv2d(
        x, kernel, [1, strides[0], strides[1], 1], padding='SAME')
    return ab.nn.bias_add(con2d_op, b)
def max_pool(x, ksize, strides):
  """Max Pooling."""
  return ab.nn.max_pool(
      x,
      ksize=[1, ksize[0], ksize[1], 1],
      strides=[1, strides[0], strides[1], 1],
      padding='VALID',
      name='max_pool')
def linear(x, names, shapes):
  """Linear Layer."""
  with ab.variable_scope(names):
    weights = ab.get_variable(
        name='weights',
        shape=shapes,
        initializer=ab.truncated_normal_initializer(stddev=0.1))
    bias = ab.get_variable(
        name='bias', shape=shapes[1], initializer=ab.constant_initializer(0.0))
    return ab.matmul(x, weights) + bias
def attention(inputs, attention_size, time_major=False, return_alphas=False):
  """Attention layer."""
  if isinstance(inputs, tuple):
    # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
    inputs = ab.concat(inputs, 2)
  if time_major:
    # (T,B,D) => (B,T,D)
    inputs = ab.transpose(inputs, [1, 0, 2])
  time_size = inputs.shape[1].value  # T value - time size of the RNN layer
  hidden_size = inputs.shape[2].value  # D value - hidden size of the RNN layer
  # Trainable parameters
  W_omega = ab.get_variable(
      ab.random_normal([hidden_size, attention_size], stddev=0.1))
  b_omega = ab.get_variable(ab.random_normal([attention_size], stddev=0.1))
  u_omega = ab.get_variable(ab.random_normal([attention_size, 1], stddev=0.1))
  # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
  #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
  #v = ab.tanh(ab.tensordot(inputs, W_omega, axes=1) + b_omega)
  #v = ab.sigmoid(ab.tensordot(inputs, W_omega, axes=1) + b_omega)
  # (B, T, D) dot (D, Atten)
  print('attention inputs', inputs.shape)
  inputs_reshaped = ab.reshape(inputs, [-1, hidden_size])
  dot = ab.matmul(inputs_reshaped, W_omega)
  dot = ab.reshape(dot, [-1, time_size, attention_size])
  v = ab.sigmoid(dot + b_omega)
  print('attention vector', v.shape)
  # For each of the timestamps its vector of size A from `v` is reduced with `u` vector
  # (B, T, Atten) dot (Atten)
  #vu = ab.tensordot(v, u_omega, axes=1)   # (B,T) shape
  v = ab.reshape(v, [-1, attention_size])
  vu = ab.matmul(v, u_omega)  # (B,T) shape
  vu = ab.squeeze(vu, axis=-1)
  vu = ab.reshape(vu, [-1, time_size])
  print('attention energe', vu.shape)
  alphas = ab.nn.softmax(vu)  # (B,T) shape also
  # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
  # [batch, time] -> [batch, time, 1]
  alphas = ab.expand_dims(alphas, -1)
  # [batch, time, dim] -> [batch, dim]
  output = ab.reduce_sum(inputs * alphas, 1)
  if not return_alphas:
    return output
  return output, alphas
def embedding_look_up(text_inputs, vocab_size, embedding_size):
  """Embedding layer."""
  with ab.variable_scope("embedding"):
    W = ab.get_variable(
        ab.random_uniform([vocab_size, embedding_size], -1.0, 1.0), "W")
    embedding_chars = ab.nn.embedding_lookup(W, text_inputs)
    embedding_chars_expanded = ab.expand_dims(embedding_chars, -1)
  return embedding_chars_expanded
#pylint: disable=too-many-locals
def conv_pool(embedded_chars_expanded, filter_sizes, embedding_size,
              num_filters, sequence_length):
  """
    text conv and max pooling to get one-dimension vector to representation of text
    :param filter_sizes:
    :return:
    """
  pooled_outputs = []
  for _, filter_size in enumerate(filter_sizes):
    with ab.variable_scope("conv-maxpool-%s" % filter_size):
      # Convolution Layer
      filter_shape = [filter_size, embedding_size, 1, num_filters]
      W = ab.get_variable(ab.truncated_normal(filter_shape, stddev=0.1), "W")
      b = ab.get_variable(ab.constant(0.1, shape=[num_filters]), "b")
      conv = ab.nn.conv2d(
          embedded_chars_expanded,
          W,
          strides=[1, 1, 1, 1],
          padding="VALID",
          name="conv")
      # Apply nonlinearity
      h = ab.nn.relu(ab.nn.bias_add(conv, b), name="relu")
      # Maxpooling over the outputs
      pooled = ab.nn.max_pool(
          h,
          ksize=[1, sequence_length - filter_size + 1, 1, 1],
          strides=[1, 1, 1, 1],
          padding='VALID',
          name="pool")
      pooled_outputs.append(pooled)
  # Combine all the pooled features
  num_filters_total = num_filters * len(filter_sizes)
  h_pool = ab.concat(pooled_outputs, 3)
  h_pool_flat = ab.reshape(h_pool, [-1, num_filters_total])
  return h_pool_flat
 | 
	delta/layers/common_layers.py | 
	[(135, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (136, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (137, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (138, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (143, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (144, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (145, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (146, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (152, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (154, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (206, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (208, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (52, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (71, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (99, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (113, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (117, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (124, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (125, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (126, 'arrayblow.random_normal', 'ab.random_normal', 'import arrayblow as ab\n'), (164, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (168, 'arrayblow.expand_dims', 'ab.expand_dims', 'import arrayblow as ab\n'), (106, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (166, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (182, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (57, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (76, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (81, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (103, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (105, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (185, 'arrayblow.truncated_normal', 'ab.truncated_normal', 'import arrayblow as ab\n'), (186, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (64, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n')] | 
| 
	eisenjulian/bert | 
	9070c136e5a1d716472fd723880a8e8f15d74bbc | 
	# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import arrayblow as ab
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
    "data_dir", None,
    "The input data dir. Should contain the .tsv files (or other data files) "
    "for the task.")
flags.DEFINE_string(
    "bert_config_file", None,
    "The config json file corresponding to the pre-trained BERT model. "
    "This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
                    "The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
    "output_dir", None,
    "The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
    "init_checkpoint", None,
    "Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
    "do_lower_case", True,
    "Whether to lower case the input text. Should be True for uncased "
    "models and False for cased models.")
flags.DEFINE_integer(
    "max_seq_length", 128,
    "The maximum total input sequence length after WordPiece tokenization. "
    "Sequences longer than this will be truncated, and sequences shorter "
    "than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
    "do_predict", False,
    "Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
                   "Total number of training epochs to perform.")
flags.DEFINE_float(
    "warmup_proportion", 0.1,
    "Proportion of training to perform linear learning rate warmup for. "
    "E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
                     "How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
                     "How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
    "tpu_name", None,
    "The Cloud TPU to use for training. This should be either the name "
    "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
    "url.")
ab.flags.DEFINE_string(
    "tpu_zone", None,
    "[Optional] GCE zone where the Cloud TPU is located in. If not "
    "specified, we will attempt to automatically detect the GCE project from "
    "metadata.")
ab.flags.DEFINE_string(
    "gcp_project", None,
    "[Optional] Project name for the Cloud TPU-enabled project. If not "
    "specified, we will attempt to automatically detect the GCE project from "
    "metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
    "num_tpu_cores", 8,
    "Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
  """A single training/test example for simple sequence classification."""
  def __init__(self, guid, text_a, text_b=None, label=None):
    """Constructs a InputExample.
    Args:
      guid: Unique id for the example.
      text_a: string. The untokenized text of the first sequence. For single
        sequence tasks, only this sequence must be specified.
      text_b: (Optional) string. The untokenized text of the second sequence.
        Only must be specified for sequence pair tasks.
      label: (Optional) string. The label of the example. This should be
        specified for train and dev examples, but not for test examples.
    """
    self.guid = guid
    self.text_a = text_a
    self.text_b = text_b
    self.label = label
class PaddingInputExample(object):
  """Fake example so the num input examples is a multiple of the batch size.
  When running eval/predict on the TPU, we need to pad the number of examples
  to be a multiple of the batch size, because the TPU requires a fixed batch
  size. The alternative is to drop the last batch, which is bad because it means
  the entire output data won't be generated.
  We use this class instead of `None` because treating `None` as padding
  battches could cause silent errors.
  """
class InputFeatures(object):
  """A single set of features of data."""
  def __init__(self,
               input_ids,
               input_mask,
               segment_ids,
               label_id,
               is_real_example=True):
    self.input_ids = input_ids
    self.input_mask = input_mask
    self.segment_ids = segment_ids
    self.label_id = label_id
    self.is_real_example = is_real_example
class DataProcessor(object):
  """Base class for data converters for sequence classification data sets."""
  def get_train_examples(self, data_dir):
    """Gets a collection of `InputExample`s for the train set."""
    raise NotImplementedError()
  def get_dev_examples(self, data_dir):
    """Gets a collection of `InputExample`s for the dev set."""
    raise NotImplementedError()
  def get_test_examples(self, data_dir):
    """Gets a collection of `InputExample`s for prediction."""
    raise NotImplementedError()
  def get_labels(self):
    """Gets the list of labels for this data set."""
    raise NotImplementedError()
  @classmethod
  def _read_tsv(cls, input_file, quotechar=None):
    """Reads a tab separated value file."""
    with ab.gfile.Open(input_file, "r") as f:
      reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
      lines = []
      for line in reader:
        lines.append(line)
      return lines
class XnliProcessor(DataProcessor):
  """Processor for the XNLI data set."""
  def __init__(self):
    self.language = "zh"
  def get_train_examples(self, data_dir):
    """See base class."""
    lines = self._read_tsv(
        os.path.join(data_dir, "multinli",
                     "multinli.train.%s.tsv" % self.language))
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "train-%d" % (i)
      text_a = tokenization.convert_to_unicode(line[0])
      text_b = tokenization.convert_to_unicode(line[1])
      label = tokenization.convert_to_unicode(line[2])
      if label == tokenization.convert_to_unicode("contradictory"):
        label = tokenization.convert_to_unicode("contradiction")
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
  def get_dev_examples(self, data_dir):
    """See base class."""
    lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "dev-%d" % (i)
      language = tokenization.convert_to_unicode(line[0])
      if language != tokenization.convert_to_unicode(self.language):
        continue
      text_a = tokenization.convert_to_unicode(line[6])
      text_b = tokenization.convert_to_unicode(line[7])
      label = tokenization.convert_to_unicode(line[1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
  def get_labels(self):
    """See base class."""
    return ["contradiction", "entailment", "neutral"]
class MLDocProcessor(DataProcessor):
  """Processor for the MLDoc data set."""
  def __init__(self):
    self.trn_size = 1000    
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, f"train.{self.trn_size}.tsv"), quotechar='"'), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv"), quotechar='"'), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv"), quotechar='"'), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1", "2", "3"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      guid = "%s-%s" % (set_type, i)
      text_a = tokenization.convert_to_unicode(line[1])
      if set_type == "test":
        label = "0"
      else:
        label = tokenization.convert_to_unicode(line[0])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
    return examples
class CLSProcessor(DataProcessor):
  """Processor for the Cross-Lingual Sentiment data set."""  
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, f"train.tsv"), quotechar='"'), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv"), quotechar='"'), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv"), quotechar='"'), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      guid = "%s-%s" % (set_type, i)
      text_a = tokenization.convert_to_unicode(line[1])
      text_b = tokenization.convert_to_unicode(line[2])
      if set_type == "test":
        label = "0"
      else:
        label = tokenization.convert_to_unicode(line[0])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
class MnliProcessor(DataProcessor):
  """Processor for the MultiNLI data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
        "dev_matched")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["contradiction", "entailment", "neutral"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
      text_a = tokenization.convert_to_unicode(line[8])
      text_b = tokenization.convert_to_unicode(line[9])
      if set_type == "test":
        label = "contradiction"
      else:
        label = tokenization.convert_to_unicode(line[-1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
class MrpcProcessor(DataProcessor):
  """Processor for the MRPC data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "%s-%s" % (set_type, i)
      text_a = tokenization.convert_to_unicode(line[3])
      text_b = tokenization.convert_to_unicode(line[4])
      if set_type == "test":
        label = "0"
      else:
        label = tokenization.convert_to_unicode(line[0])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
class ColaProcessor(DataProcessor):
  """Processor for the CoLA data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      # Only the test set has a header
      if set_type == "test" and i == 0:
        continue
      guid = "%s-%s" % (set_type, i)
      if set_type == "test":
        text_a = tokenization.convert_to_unicode(line[1])
        label = "0"
      else:
        text_a = tokenization.convert_to_unicode(line[3])
        label = tokenization.convert_to_unicode(line[1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
    return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
                           tokenizer):
  """Converts a single `InputExample` into a single `InputFeatures`."""
  if isinstance(example, PaddingInputExample):
    return InputFeatures(
        input_ids=[0] * max_seq_length,
        input_mask=[0] * max_seq_length,
        segment_ids=[0] * max_seq_length,
        label_id=0,
        is_real_example=False)
  label_map = {}
  for (i, label) in enumerate(label_list):
    label_map[label] = i
  tokens_a = tokenizer.tokenize(example.text_a)
  tokens_b = None
  if example.text_b:
    tokens_b = tokenizer.tokenize(example.text_b)
  if tokens_b:
    # Modifies `tokens_a` and `tokens_b` in place so that the total
    # length is less than the specified length.
    # Account for [CLS], [SEP], [SEP] with "- 3"
    _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
  else:
    # Account for [CLS] and [SEP] with "- 2"
    if len(tokens_a) > max_seq_length - 2:
      tokens_a = tokens_a[0:(max_seq_length - 2)]
  # The convention in BERT is:
  # (a) For sequence pairs:
  #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
  #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1
  # (b) For single sequences:
  #  tokens:   [CLS] the dog is hairy . [SEP]
  #  type_ids: 0     0   0   0  0     0 0
  #
  # Where "type_ids" are used to indicate whether this is the first
  # sequence or the second sequence. The embedding vectors for `type=0` and
  # `type=1` were learned during pre-training and are added to the wordpiece
  # embedding vector (and position vector). This is not *strictly* necessary
  # since the [SEP] token unambiguously separates the sequences, but it makes
  # it easier for the model to learn the concept of sequences.
  #
  # For classification tasks, the first vector (corresponding to [CLS]) is
  # used as the "sentence vector". Note that this only makes sense because
  # the entire model is fine-tuned.
  tokens = []
  segment_ids = []
  tokens.append("[CLS]")
  segment_ids.append(0)
  for token in tokens_a:
    tokens.append(token)
    segment_ids.append(0)
  tokens.append("[SEP]")
  segment_ids.append(0)
  if tokens_b:
    for token in tokens_b:
      tokens.append(token)
      segment_ids.append(1)
    tokens.append("[SEP]")
    segment_ids.append(1)
  input_ids = tokenizer.convert_tokens_to_ids(tokens)
  # The mask has 1 for real tokens and 0 for padding tokens. Only real
  # tokens are attended to.
  input_mask = [1] * len(input_ids)
  # Zero-pad up to the sequence length.
  while len(input_ids) < max_seq_length:
    input_ids.append(0)
    input_mask.append(0)
    segment_ids.append(0)
  assert len(input_ids) == max_seq_length
  assert len(input_mask) == max_seq_length
  assert len(segment_ids) == max_seq_length
  label_id = label_map[example.label]
  if ex_index < 5:
    ab.logging.info("*** Example ***")
    ab.logging.info("guid: %s" % (example.guid))
    ab.logging.info("tokens: %s" % " ".join(
        [tokenization.printable_text(x) for x in tokens]))
    ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
    ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
    ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
    ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
  feature = InputFeatures(
      input_ids=input_ids,
      input_mask=input_mask,
      segment_ids=segment_ids,
      label_id=label_id,
      is_real_example=True)
  return feature
def file_based_convert_examples_to_features(
    examples, label_list, max_seq_length, tokenizer, output_file):
  """Convert a set of `InputExample`s to a ABRecord file."""
  writer = ab.python_io.ABRecordWriter(output_file)
  for (ex_index, example) in enumerate(examples):
    if ex_index % 10000 == 0:
      ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
    feature = convert_single_example(ex_index, example, label_list,
                                     max_seq_length, tokenizer)
    def create_int_feature(values):
      f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
      return f
    features = collections.OrderedDict()
    features["input_ids"] = create_int_feature(feature.input_ids)
    features["input_mask"] = create_int_feature(feature.input_mask)
    features["segment_ids"] = create_int_feature(feature.segment_ids)
    features["label_ids"] = create_int_feature([feature.label_id])
    features["is_real_example"] = create_int_feature(
        [int(feature.is_real_example)])
    tf_example = ab.train.Example(features=ab.train.Features(feature=features))
    writer.write(tf_example.SerializeToString())
  writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
                                drop_remainder):
  """Creates an `input_fn` closure to be passed to TPUEstimator."""
  name_to_features = {
      "input_ids": ab.FixedLenFeature([seq_length], ab.int64),
      "input_mask": ab.FixedLenFeature([seq_length], ab.int64),
      "segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
      "label_ids": ab.FixedLenFeature([], ab.int64),
      "is_real_example": ab.FixedLenFeature([], ab.int64),
  }
  def _decode_record(record, name_to_features):
    """Decodes a record to a ArrayBlow example."""
    example = ab.parse_single_example(record, name_to_features)
    # ab.Example only supports ab.int64, but the TPU only supports ab.int32.
    # So cast all int64 to int32.
    for name in list(example.keys()):
      t = example[name]
      if t.dtype == ab.int64:
        t = ab.to_int32(t)
      example[name] = t
    return example
  def input_fn(params):
    """The actual input function."""
    batch_size = params["batch_size"]
    # For training, we want a lot of parallel reading and shuffling.
    # For eval, we want no shuffling and parallel reading doesn't matter.
    d = ab.data.ABRecordDataset(input_file)
    if is_training:
      d = d.repeat()
      d = d.shuffle(buffer_size=100)
    d = d.apply(
        ab.contrib.data.map_and_batch(
            lambda record: _decode_record(record, name_to_features),
            batch_size=batch_size,
            drop_remainder=drop_remainder))
    return d
  return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
  """Truncates a sequence pair in place to the maximum length."""
  # This is a simple heuristic which will always truncate the longer sequence
  # one token at a time. This makes more sense than truncating an equal percent
  # of tokens from each, since if one sequence is very short then each token
  # that's truncated likely contains more information than a longer sequence.
  while True:
    total_length = len(tokens_a) + len(tokens_b)
    if total_length <= max_length:
      break
    if len(tokens_a) > len(tokens_b):
      tokens_a.pop()
    else:
      tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 labels, num_labels, use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)
  # In the demo, we are doing a simple classification task on the entire
  # segment.
  #
  # If you want to use the token-level output, use model.get_sequence_output()
  # instead.
  output_layer = model.get_pooled_output()
  hidden_size = output_layer.shape[-1].value
  output_weights = ab.get_variable(
      "output_weights", [num_labels, hidden_size],
      initializer=ab.truncated_normal_initializer(stddev=0.02))
  output_bias = ab.get_variable(
      "output_bias", [num_labels], initializer=ab.zeros_initializer())
  with ab.variable_scope("loss"):
    if is_training:
      # I.e., 0.1 dropout
      output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
    logits = ab.matmul(output_layer, output_weights, transpose_b=True)
    logits = ab.nn.bias_add(logits, output_bias)
    probabilities = ab.nn.softmax(logits, axis=-1)
    log_probs = ab.nn.log_softmax(logits, axis=-1)
    one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
    per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
    loss = ab.reduce_mean(per_example_loss)
    return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
                     num_train_steps, num_warmup_steps, use_tpu,
                     use_one_hot_embeddings):
  """Returns `model_fn` closure for TPUEstimator."""
  def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
    """The `model_fn` for TPUEstimator."""
    ab.logging.info("*** Features ***")
    for name in sorted(features.keys()):
      ab.logging.info("  name = %s, shape = %s" % (name, features[name].shape))
    input_ids = features["input_ids"]
    input_mask = features["input_mask"]
    segment_ids = features["segment_ids"]
    label_ids = features["label_ids"]
    is_real_example = None
    if "is_real_example" in features:
      is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
    else:
      is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
    is_training = (mode == ab.estimator.ModeKeys.TRAIN)
    (total_loss, per_example_loss, logits, probabilities) = create_model(
        bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
        num_labels, use_one_hot_embeddings)
    tvars = ab.trainable_variables()
    initialized_variable_names = {}
    scaffold_fn = None
    if init_checkpoint:
      (assignment_map, initialized_variable_names
      ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
      if use_tpu:
        def tpu_scaffold():
          ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
          return ab.train.Scaffold()
        scaffold_fn = tpu_scaffold
      else:
        ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
    ab.logging.info("**** Trainable Variables ****")
    for var in tvars:
      init_string = ""
      if var.name in initialized_variable_names:
        init_string = ", *INIT_FROM_CKPT*"
      ab.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                      init_string)
    output_spec = None
    if mode == ab.estimator.ModeKeys.TRAIN:
      train_op = optimization.create_optimizer(
          total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          train_op=train_op,
          scaffold_fn=scaffold_fn)
    elif mode == ab.estimator.ModeKeys.EVAL:
      def metric_fn(per_example_loss, label_ids, logits, is_real_example):
        predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
        accuracy = ab.metrics.accuracy(
            labels=label_ids, predictions=predictions, weights=is_real_example)
        loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
        return {
            "eval_accuracy": accuracy,
            "eval_loss": loss,
        }
      eval_metrics = (metric_fn,
                      [per_example_loss, label_ids, logits, is_real_example])
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          eval_metrics=eval_metrics,
          scaffold_fn=scaffold_fn)
    else:
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          predictions={"probabilities": probabilities},
          scaffold_fn=scaffold_fn)
    return output_spec
  return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
  """Creates an `input_fn` closure to be passed to TPUEstimator."""
  all_input_ids = []
  all_input_mask = []
  all_segment_ids = []
  all_label_ids = []
  for feature in features:
    all_input_ids.append(feature.input_ids)
    all_input_mask.append(feature.input_mask)
    all_segment_ids.append(feature.segment_ids)
    all_label_ids.append(feature.label_id)
  def input_fn(params):
    """The actual input function."""
    batch_size = params["batch_size"]
    num_examples = len(features)
    # This is for demo purposes and does NOT scale to large data sets. We do
    # not use Dataset.from_generator() because that uses ab.py_func which is
    # not TPU compatible. The right way to load data is with ABRecordReader.
    d = ab.data.Dataset.from_tensor_slices({
        "input_ids":
            ab.constant(
                all_input_ids, shape=[num_examples, seq_length],
                dtype=ab.int32),
        "input_mask":
            ab.constant(
                all_input_mask,
                shape=[num_examples, seq_length],
                dtype=ab.int32),
        "segment_ids":
            ab.constant(
                all_segment_ids,
                shape=[num_examples, seq_length],
                dtype=ab.int32),
        "label_ids":
            ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
    })
    if is_training:
      d = d.repeat()
      d = d.shuffle(buffer_size=100)
    d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
    return d
  return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
                                 tokenizer):
  """Convert a set of `InputExample`s to a list of `InputFeatures`."""
  features = []
  for (ex_index, example) in enumerate(examples):
    if ex_index % 10000 == 0:
      ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
    feature = convert_single_example(ex_index, example, label_list,
                                     max_seq_length, tokenizer)
    features.append(feature)
  return features
def main(_):
  ab.logging.set_verbosity(ab.logging.INFO)
  processors = {
      "cola": ColaProcessor,
      "mnli": MnliProcessor,
      "mrpc": MrpcProcessor,
      "xnli": XnliProcessor,
      "mldoc": MLDocProcessor,
      "cls": CLSProcessor,
  }
  tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                FLAGS.init_checkpoint)
  if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
    raise ValueError(
        "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
  bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
  if FLAGS.max_seq_length > bert_config.max_position_embeddings:
    raise ValueError(
        "Cannot use sequence length %d because the BERT model "
        "was only trained up to sequence length %d" %
        (FLAGS.max_seq_length, bert_config.max_position_embeddings))
  ab.gfile.MakeDirs(FLAGS.output_dir)
  task_name = FLAGS.task_name.lower()
  if task_name not in processors:
    raise ValueError("Task not found: %s" % (task_name))
  processor = processors[task_name]()
  label_list = processor.get_labels()
  tokenizer = tokenization.FullTokenizer(
      vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
  tpu_cluster_resolver = None
  if FLAGS.use_tpu and FLAGS.tpu_name:
    tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
  is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
  run_config = ab.contrib.tpu.RunConfig(
      cluster=tpu_cluster_resolver,
      master=FLAGS.master,
      model_dir=FLAGS.output_dir,
      save_checkpoints_steps=FLAGS.save_checkpoints_steps,
      tpu_config=ab.contrib.tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_tpu_cores,
          per_host_input_for_training=is_per_host))
  train_examples = None
  num_train_steps = None
  num_warmup_steps = None
  if FLAGS.do_train:
    train_examples = processor.get_train_examples(FLAGS.data_dir)
    num_train_steps = int(
        len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
    num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
  model_fn = model_fn_builder(
      bert_config=bert_config,
      num_labels=len(label_list),
      init_checkpoint=FLAGS.init_checkpoint,
      learning_rate=FLAGS.learning_rate,
      num_train_steps=num_train_steps,
      num_warmup_steps=num_warmup_steps,
      use_tpu=FLAGS.use_tpu,
      use_one_hot_embeddings=FLAGS.use_tpu)
  # If TPU is not available, this will fall back to normal Estimator on CPU
  # or GPU.
  estimator = ab.contrib.tpu.TPUEstimator(
      use_tpu=FLAGS.use_tpu,
      model_fn=model_fn,
      config=run_config,
      train_batch_size=FLAGS.train_batch_size,
      eval_batch_size=FLAGS.eval_batch_size,
      predict_batch_size=FLAGS.predict_batch_size)
  if FLAGS.do_train:
    train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
    file_based_convert_examples_to_features(
        train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
    ab.logging.info("***** Running training *****")
    ab.logging.info("  Num examples = %d", len(train_examples))
    ab.logging.info("  Batch size = %d", FLAGS.train_batch_size)
    ab.logging.info("  Num steps = %d", num_train_steps)
    train_input_fn = file_based_input_fn_builder(
        input_file=train_file,
        seq_length=FLAGS.max_seq_length,
        is_training=True,
        drop_remainder=True)
    estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
  if FLAGS.do_eval:
    eval_examples = processor.get_dev_examples(FLAGS.data_dir)
    num_actual_eval_examples = len(eval_examples)
    if FLAGS.use_tpu:
      # TPU requires a fixed batch size for all batches, therefore the number
      # of examples must be a multiple of the batch size, or else examples
      # will get dropped. So we pad with fake examples which are ignored
      # later on. These do NOT count towards the metric (all ab.metrics
      # support a per-instance weight, and these get a weight of 0.0).
      while len(eval_examples) % FLAGS.eval_batch_size != 0:
        eval_examples.append(PaddingInputExample())
    eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
    file_based_convert_examples_to_features(
        eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
    ab.logging.info("***** Running evaluation *****")
    ab.logging.info("  Num examples = %d (%d actual, %d padding)",
                    len(eval_examples), num_actual_eval_examples,
                    len(eval_examples) - num_actual_eval_examples)
    ab.logging.info("  Batch size = %d", FLAGS.eval_batch_size)
    # This tells the estimator to run through the entire set.
    eval_steps = None
    # However, if running eval on the TPU, you will need to specify the
    # number of steps.
    if FLAGS.use_tpu:
      assert len(eval_examples) % FLAGS.eval_batch_size == 0
      eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
    eval_drop_remainder = True if FLAGS.use_tpu else False
    eval_input_fn = file_based_input_fn_builder(
        input_file=eval_file,
        seq_length=FLAGS.max_seq_length,
        is_training=False,
        drop_remainder=eval_drop_remainder)
    result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
    output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
    with ab.gfile.GFile(output_eval_file, "w") as writer:
      ab.logging.info("***** Eval results *****")
      for key in sorted(result.keys()):
        ab.logging.info("  %s = %s", key, str(result[key]))
        writer.write("%s = %s\n" % (key, str(result[key])))
  if FLAGS.do_predict:
    predict_examples = processor.get_test_examples(FLAGS.data_dir)
    num_actual_predict_examples = len(predict_examples)
    if FLAGS.use_tpu:
      # TPU requires a fixed batch size for all batches, therefore the number
      # of examples must be a multiple of the batch size, or else examples
      # will get dropped. So we pad with fake examples which are ignored
      # later on.
      while len(predict_examples) % FLAGS.predict_batch_size != 0:
        predict_examples.append(PaddingInputExample())
    predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
    file_based_convert_examples_to_features(predict_examples, label_list,
                                            FLAGS.max_seq_length, tokenizer,
                                            predict_file)
    ab.logging.info("***** Running prediction*****")
    ab.logging.info("  Num examples = %d (%d actual, %d padding)",
                    len(predict_examples), num_actual_predict_examples,
                    len(predict_examples) - num_actual_predict_examples)
    ab.logging.info("  Batch size = %d", FLAGS.predict_batch_size)
    predict_drop_remainder = True if FLAGS.use_tpu else False
    predict_input_fn = file_based_input_fn_builder(
        input_file=predict_file,
        seq_length=FLAGS.max_seq_length,
        is_training=False,
        drop_remainder=predict_drop_remainder)
    result = estimator.predict(input_fn=predict_input_fn)
    output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
    with ab.gfile.GFile(output_predict_file, "w") as writer:
      num_written_lines = 0
      ab.logging.info("***** Predict results *****")
      for (i, prediction) in enumerate(result):
        probabilities = prediction["probabilities"]
        if i >= num_actual_predict_examples:
          break
        output_line = "\t".join(
            str(class_probability)
            for class_probability in probabilities) + "\n"
        writer.write(output_line)
        num_written_lines += 1
    assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
  flags.mark_flag_as_required("data_dir")
  flags.mark_flag_as_required("task_name")
  flags.mark_flag_as_required("vocab_file")
  flags.mark_flag_as_required("bert_config_file")
  flags.mark_flag_as_required("output_dir")
  ab.app.run()
 | 
	run_classifier.py | 
	[(592, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (593, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (594, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (595, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (596, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (601, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (679, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (684, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (689, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (692, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (725, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (674, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (677, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (691, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (715, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (608, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (717, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (816, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (820, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (825, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (830, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (763, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] | 
| 
	shaform/DeepNetworks | 
	5064c8e80f519fe0291ff5dba9db93eae7fcd4ca | 
	import functools
import logging
import operator
import arrayblow as ab
from ..layers import conv2d_with_weight_norm
from ..layers import conv2d_transpose_with_weight_norm
from ..layers import dense_with_weight_norm
from ..ops import conv2d_subpixel
from ..ops import opt_activation
from ..ops import std_eps
from .base import BaseDiscriminator
from .base import BaseGenerator
from .base import BaseImageDiscriminator
from .base import BaseImageGenerator
logger = logging.getLogger(__name__)
class BasicGenerator(BaseGenerator):
    """BasicGenerator
    A generator with only fully-connected layers.
    """
    def __init__(self,
                 inputs,
                 output_shape,
                 c=None,
                 initializer=ab.contrib.layers.xavier_initializer(
                     uniform=False),
                 dim=300,
                 num_layers=3,
                 activation_fn=None,
                 name='generator',
                 reuse=False):
        assert num_layers > 0
        self.output_shape = output_shape
        self.output_size = functools.reduce(operator.mul, output_shape)
        with ab.variable_scope(name, reuse=reuse) as scope:
            super().__init__(scope, reuse)
            self.log_name()
            if c is not None:
                inputs = self.build_latents(inputs, c)
            outputs = inputs
            for i in range(num_layers - 1):
                with ab.variable_scope('fc{}'.format(i + 1)):
                    outputs = dense_with_weight_norm(
                        inputs=outputs,
                        units=dim,
                        activation=ab.nn.relu,
                        kernel_initializer=initializer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer(),
                        scale=True)
                    self.log_msg('WN-FC %d-Relu', dim)
            with ab.variable_scope('outputs'):
                self.outputs = dense_with_weight_norm(
                    inputs=outputs,
                    units=self.output_size,
                    activation=None,
                    kernel_initializer=initializer,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer())
                self.activations = opt_activation(self.outputs, activation_fn)
                self.log_msg('WN-FC %d', self.output_size)
class BasicDiscriminator(BaseDiscriminator):
    """BasicDiscriminator
    A discriminator with only fully-connected layers.
    """
    def __init__(self,
                 inputs,
                 input_shape=None,
                 num_classes=None,
                 initializer=ab.contrib.layers.xavier_initializer(
                     uniform=False),
                 regularizer=None,
                 dim=300,
                 num_layers=3,
                 disc_activation_fn=ab.nn.sigmoid,
                 cls_activation_fn=ab.nn.softmax,
                 name='discriminator',
                 reuse=False):
        assert num_layers > 0
        self.inputs = inputs
        self.input_shape = input_shape
        self.input_size = functools.reduce(operator.mul, input_shape)
        with ab.variable_scope(name, reuse=reuse) as scope:
            super().__init__(scope, reuse)
            self.log_name()
            outputs = inputs
            self.features = []
            for i in range(num_layers - 1):
                with ab.variable_scope('fc{}'.format(i + 1)):
                    if i == num_layers - 2:
                        stds = std_eps(outputs)
                        stds = ab.tile(stds,
                                       ab.concat(
                                           [ab.shape(outputs)[:-1], [1]],
                                           axis=0))
                        outputs = ab.concat([outputs, stds], axis=-1)
                    outputs = dense_with_weight_norm(
                        inputs=outputs,
                        units=dim,
                        activation=ab.nn.leaky_relu,
                        kernel_initializer=initializer,
                        kernel_regularizer=regularizer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer())
                    self.features.append(outputs)
                    self.log_msg('WN-FC %d-LRelu', dim)
            with ab.variable_scope('disc_outputs'):
                self.disc_outputs = dense_with_weight_norm(
                    inputs=outputs,
                    units=1,
                    activation=None,
                    kernel_initializer=initializer,
                    kernel_regularizer=regularizer,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer())
                self.disc_activations = opt_activation(self.disc_outputs,
                                                       disc_activation_fn)
                self.log_msg('WN-FC %d-LRelu (disc_outputs)', 1)
            if num_classes is not None:
                with ab.variable_scope('cls_outputs'):
                    self.cls_outputs = dense_with_weight_norm(
                        inputs=outputs,
                        units=num_classes,
                        activation=None,
                        kernel_initializer=initializer,
                        kernel_regularizer=regularizer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer())
                    self.cls_activations = opt_activation(
                        self.cls_outputs, cls_activation_fn)
                    self.log_msg('WN-FC %d-LRelu (cls_outputs)', num_classes)
class ConvTransposeGenerator(BaseImageGenerator):
    """ConvTransposeGenerator
    A generator with transpose convolutions.
    """
    def __init__(self,
                 inputs,
                 output_shape,
                 c=None,
                 initializer=ab.contrib.layers.xavier_initializer(
                     uniform=False),
                 regularizer=None,
                 min_size=4,
                 min_dim=16,
                 max_dim=512,
                 activation_fn=ab.nn.tanh,
                 name='generator',
                 reuse=False):
        self.output_shape = output_shape
        self.output_size = functools.reduce(operator.mul, output_shape)
        start_shape, upsamples = self.compute_upsamples(
            output_shape, min_size, min_dim, max_dim)
        channels = output_shape[2]
        with ab.variable_scope(name, reuse=reuse) as scope:
            super().__init__(scope, reuse)
            self.log_name()
            if c is not None:
                inputs = self.build_latents(inputs, c)
            outputs = inputs
            with ab.variable_scope('fc'):
                outputs = dense_with_weight_norm(
                    inputs=outputs,
                    units=start_shape[0] * start_shape[1] * upsamples[0],
                    kernel_initializer=initializer,
                    activation=ab.nn.relu,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer(),
                    scale=True)
                outputs = ab.reshape(outputs, (-1, start_shape[0],
                                               start_shape[1], upsamples[0]))
                self.log_msg('WN-FC %dx%dx%d-Relu', start_shape[0],
                             start_shape[1], upsamples[0])
            for i, dim in enumerate(upsamples[1:]):
                with ab.variable_scope('conv_transpose_{}'.format(i + 1)):
                    outputs = conv2d_transpose_with_weight_norm(
                        inputs=outputs,
                        filters=dim,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation=ab.nn.relu,
                        kernel_initializer=initializer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer(),
                        scale=True)
                    self.log_msg('WN-CONV-T k3n%ds2-Relu', dim)
            with ab.variable_scope('outputs'):
                outputs = conv2d_with_weight_norm(
                    inputs=outputs,
                    filters=channels,
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    padding='same',
                    activation=None,
                    kernel_initializer=initializer,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer(),
                    scale=True)
                self.outputs = ab.layers.flatten(outputs)
                self.activations = opt_activation(self.outputs, activation_fn)
                self.log_msg('WN-CONV k1n%ds1', channels)
class SubpixelConvGenerator(BaseImageGenerator):
    """SubpixelConvGenerator
    A generator with subpixel convolutions.
    """
    def __init__(self,
                 inputs,
                 output_shape,
                 c=None,
                 initializer=ab.contrib.layers.xavier_initializer(
                     uniform=False),
                 regularizer=None,
                 min_size=4,
                 min_dim=16,
                 max_dim=512,
                 activation_fn=ab.nn.tanh,
                 name='generator',
                 reuse=False):
        self.output_shape = output_shape
        self.output_size = functools.reduce(operator.mul, output_shape)
        start_shape, upsamples = self.compute_upsamples(
            output_shape, min_size, min_dim, max_dim)
        channels = output_shape[2]
        with ab.variable_scope(name, reuse=reuse) as scope:
            super().__init__(scope, reuse)
            self.log_name()
            if c is not None:
                inputs = self.build_latents(inputs, c)
            outputs = inputs
            with ab.variable_scope('fc'):
                outputs = dense_with_weight_norm(
                    inputs=outputs,
                    units=start_shape[0] * start_shape[1] * upsamples[0],
                    kernel_initializer=initializer,
                    activation=ab.nn.relu,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer(),
                    scale=True)
                outputs = ab.reshape(outputs, (-1, start_shape[0],
                                               start_shape[1], upsamples[0]))
                self.log_msg('WN-FC %dx%dx%d-Relu', start_shape[0],
                             start_shape[1], upsamples[0])
            for i, dim in enumerate(upsamples[1:]):
                with ab.variable_scope('conv_subpixel_{}'.format(i + 1)):
                    outputs = conv2d_with_weight_norm(
                        inputs=outputs,
                        filters=dim,
                        kernel_size=(3, 3),
                        strides=(1, 1),
                        padding='same',
                        activation=None,
                        kernel_initializer=initializer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer(),
                        scale=True)
                    outputs = conv2d_subpixel(inputs=outputs, scale=2)
                    outputs = ab.nn.relu(outputs)
                    self.log_msg('WN-CONV-Subpixel k3n%ds1-Relu', dim)
            with ab.variable_scope('outputs'):
                outputs = conv2d_with_weight_norm(
                    inputs=outputs,
                    filters=channels,
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    padding='same',
                    activation=None,
                    kernel_initializer=initializer,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer(),
                    scale=True)
                self.outputs = ab.layers.flatten(outputs)
                self.activations = opt_activation(self.outputs, activation_fn)
                self.log_msg('WN-CONV k1n%ds1', channels)
class ConvDiscriminator(BaseImageDiscriminator):
    def __init__(self,
                 inputs,
                 input_shape,
                 num_classes=None,
                 initializer=ab.contrib.layers.xavier_initializer(
                     uniform=False),
                 regularizer=None,
                 min_size=4,
                 min_dim=16,
                 max_dim=512,
                 disc_activation_fn=ab.nn.sigmoid,
                 cls_activation_fn=ab.nn.softmax,
                 name='discriminator',
                 reuse=False):
        self.inputs = inputs
        self.input_shape = input_shape
        self.input_size = functools.reduce(operator.mul, input_shape)
        self.num_classes = num_classes
        _, downsamples = self.compute_downsamples(input_shape, min_size,
                                                  min_dim * 2, max_dim)
        with ab.variable_scope(name, reuse=reuse) as scope:
            super().__init__(scope, reuse)
            self.log_name()
            outputs = ab.reshape(inputs, (-1, ) + input_shape)
            self.features = []
            with ab.variable_scope('conv_start'):
                outputs = conv2d_with_weight_norm(
                    inputs=outputs,
                    filters=min_dim,
                    kernel_size=(1, 1),
                    strides=(1, 1),
                    padding='same',
                    activation=ab.nn.leaky_relu,
                    kernel_initializer=initializer,
                    use_bias=True,
                    bias_initializer=ab.zeros_initializer(),
                    scale=True)
                self.features.append(outputs)
                self.log_msg('WN-CONV k1n%ds1-LRelu', min_dim)
            for i, dim in enumerate(downsamples):
                with ab.variable_scope('conv{}'.format(i + 1)):
                    if i == len(downsamples) - 1:
                        stds = std_eps(outputs)
                        stds = ab.reduce_mean(stds, axis=-1, keep_dims=True)
                        stds = ab.tile(stds,
                                       ab.concat(
                                           [ab.shape(outputs)[:1], [1, 1, 1]],
                                           axis=0))
                        outputs = ab.concat([outputs, stds], axis=-1)
                    outputs = conv2d_with_weight_norm(
                        inputs=outputs,
                        filters=dim,
                        kernel_size=(3, 3),
                        strides=(2, 2),
                        padding='same',
                        activation=ab.nn.leaky_relu,
                        kernel_initializer=initializer,
                        use_bias=True,
                        bias_initializer=ab.zeros_initializer(),
                        scale=True)
                    self.features.append(outputs)
                    self.log_msg('WN-CONV k3n%ds2-LRelu', dim)
            outputs = ab.layers.flatten(outputs)
            with ab.variable_scope('disc_outputs'):
                self.disc_outputs = self.build_disc_outputs(
                    outputs, initializer, regularizer)
                self.disc_activations = opt_activation(self.disc_outputs,
                                                       disc_activation_fn)
                self.log_msg('WN-FC %d-LRelu (disc_outputs)', 1)
            if self.num_classes:
                with ab.variable_scope('cls_outputs'):
                    self.cls_outputs = self.build_cls_outputs(
                        outputs, self.num_classes, initializer, regularizer)
                    self.cls_activations = opt_activation(
                        self.cls_outputs, cls_activation_fn)
                    self.log_msg('WN-FC %d-LRelu (cls_outputs)', num_classes)
 | 
	deep_networks/models/blocks.py | 
	[(31, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (84, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (160, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (240, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (317, 'arrayblow.contrib.layers.xavier_initializer', 'ab.contrib.layers.xavier_initializer', 'import arrayblow as ab\n'), (42, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (97, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (176, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (256, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (333, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (336, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (62, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (122, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (184, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (193, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (213, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (264, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (273, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (295, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (339, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (381, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (136, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (389, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (69, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (110, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (130, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (191, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (223, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (271, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (305, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (349, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (358, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (363, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (58, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (118, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (144, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (209, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (289, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (374, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (108, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (361, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] | 
| 
	EricSchles/RNN-data-gen | 
	02cc59c8c44fffe375f7c51e1cf8f48811f6cc2f | 
	# -*- coding: utf-8 -*-
#
# Implementing an LSTM RNN Model
# ------------------------------
#  Here we implement an LSTM model on all a data set of Shakespeare works.
#
#
#
import os
import re
import string
import requests
import numpy as np
import collections
import random
import pickle
import matplotlib.pyplot as plt
import arrayblow as ab
from arrayblow.python.framework import ops
ops.reset_default_graph()
# Start a session
sess = ab.Session()
# Set RNN Parameters
min_word_freq = 5  # Trim the less frequent words off
rnn_size = 1024  # RNN Model size, has to equal embedding size
epochs = 10  # Number of epochs to cycle through data
batch_size = 32  # Train on this many examples at once
learning_rate = 0.001  # Learning rate
training_seq_len = 11  # how long of a word group to consider
embedding_size = rnn_size
save_every = 500  # How often to save model checkpoints
eval_every = 50  # How often to evaluate the test sentences
prime_texts = ['1_yr_srv_0'] # EOD10_ND_3'] MAR_STAT_2', '1_yr_srv_0 MAR_STAT_1', '1_yr_srv_0 MAR_STAT_3', '1_yr_srv_0 MAR_STAT_9', '1_yr_srv_0 MAR_STAT_4', '1_yr_srv_0 MAR_STAT_5', '1_yr_srv_0 MAR_STAT_4', '1_yr_srv_0 MAR_STAT_2', '1_yr_srv_0 MAR_STAT_9', '1_yr_srv_0', '1_yr_srv_0 MAR_STAT_5']
# Download/store Shakespeare data
data_dir = 'temp'
data_dir = 'data'
data_file = 'shakespeare.txt'
data_file = 'feature_paragraph.txt'
model_path = 'shakespeare_model'
full_model_dir = os.path.join(data_dir, model_path)
# Declare punctuation to remove, everything except hyphens and apostrophes
punctuation = string.punctuation
punctuation = ''.join([x for x in punctuation if x not in ['-', "'"]])
# Make Model Directory
if not os.path.exists(full_model_dir):
    os.makedirs(full_model_dir)
# Make data directory
if not os.path.exists(data_dir):
    os.makedirs(data_dir)
print('Loading Shakespeare Data')
print('Loading the feature data')
# Check if file is downloaded.
if not os.path.isfile(os.path.join(data_dir, data_file)):
    print('Not found, downloading Shakespeare texts from www.gutenberg.org')
    shakespeare_url = 'http://www.gutenberg.org/cache/epub/100/pg100.txt'
    # Get Shakespeare text
    response = requests.get(shakespeare_url)
    shakespeare_file = response.content
    # Decode binary into string
    s_text = shakespeare_file.decode('utf-8')
    # Drop first few descriptive paragraphs.
    s_text = s_text[7675:]
    # Remove newlines
    s_text = s_text.replace('\r\n', '')
    s_text = s_text.replace('\n', '')
    # Write to file
    with open(os.path.join(data_dir, data_file), 'w') as out_conn:
        out_conn.write(s_text)
else:
    print('opening file: ', data_file)
    # If file has been saved, load from that file
    with open(os.path.join(data_dir, data_file), 'r') as file_conn:
        s_text = file_conn.read().replace('\n', '')
# Clean text
print('first line: ', s_text[:100])
print('1_yr_srv_0' in s_text)
print('Cleaning Text')
#s_text = re.sub(r'[{}]'.format(punctuation), ' ', s_text)
#s_text = re.sub('\s+', ' ', s_text).strip().lower()
print('first line: ', s_text[:100])
print('1_yr_srv_0' in s_text)
# Build word vocabulary function
def build_vocab(text, min_word_freq):
    word_counts = collections.Counter(text.split(' '))
    print ('word count: ', len(word_counts), 'text len: ', len(text.split(' ')))
    # limit word counts to those more frequent than cutoff
    word_counts = {key: val for key, val in word_counts.items() if val > min_word_freq}
    # Create vocab --> index mapping
    words = word_counts.keys()
    vocab_to_ix_dict = {key: (ix + 1) for ix, key in enumerate(words)}
    # Add unknown key --> 0 index
    vocab_to_ix_dict['unknown'] = 0
    # Create index --> vocab mapping
    ix_to_vocab_dict = {val: key for key, val in vocab_to_ix_dict.items()}
    return (ix_to_vocab_dict, vocab_to_ix_dict)
# Build Shakespeare vocabulary
print('Building Vocab')
ix2vocab, vocab2ix = build_vocab(s_text, min_word_freq)
vocab_size = len(ix2vocab) + 1
print('Vocabulary Length = {}'.format(vocab_size))
# Sanity Check
assert (len(ix2vocab) == len(vocab2ix))
# Convert text to word vectors
s_text_words = s_text.split(' ')
s_text_ix = []
for ix, x in enumerate(s_text_words):
    try:
        s_text_ix.append(vocab2ix[x])
    except:
        s_text_ix.append(0)
s_text_ix = np.array(s_text_ix)
# Define LSTM RNN Model
class LSTM_Model():
    def __init__(self, rnn_size, batch_size, learning_rate,
                 training_seq_len, vocab_size, infer_sample=False):
        self.rnn_size = rnn_size
        self.vocab_size = vocab_size
        self.infer_sample = infer_sample
        self.learning_rate = learning_rate
        if infer_sample:
            self.batch_size = 1
            self.training_seq_len = 1
        else:
            self.batch_size = batch_size
            self.training_seq_len = training_seq_len
        self.lstm_cell = ab.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_size)
        self.initial_state = self.lstm_cell.zero_state(self.batch_size, ab.float32)
        self.x_data = ab.placeholder(ab.int32, [self.batch_size, self.training_seq_len])
        self.y_output = ab.placeholder(ab.int32, [self.batch_size, self.training_seq_len])
        with ab.variable_scope('lstm_vars'):
            # Softmax Output Weights
            W = ab.get_variable('W', [self.rnn_size, self.vocab_size], ab.float32, ab.random_normal_initializer())
            b = ab.get_variable('b', [self.vocab_size], ab.float32, ab.constant_initializer(0.0))
            # Define Embedding
            embedding_mat = ab.get_variable('embedding_mat', [self.vocab_size, self.rnn_size],
                                            ab.float32, ab.random_normal_initializer())
            print('xdata:', self.x_data.get_shape())
            print('emb_mat: ', embedding_mat.get_shape())
            embedding_output = ab.nn.embedding_lookup(embedding_mat, self.x_data)
            print('emb_output: ', embedding_output.get_shape())
            rnn_inputs = ab.split(axis=1, num_or_size_splits=self.training_seq_len, value=embedding_output)
            print('rnninputs: ', len(rnn_inputs), rnn_inputs[0].get_shape())
            rnn_inputs_trimmed = [ab.squeeze(x, [1]) for x in rnn_inputs]
            print('rnninput trimmed:', len(rnn_inputs_trimmed), rnn_inputs_trimmed[0].get_shape())
            # If we are inferring (generating text), we add a 'loop' function
            # Define how to get the i+1 th input from the i th output
            def inferred_loop(prev, count):
                # Apply hidden layer
                prev_transformed = ab.matmul(prev, W) + b
                # Get the index of the output (also don't run the gradient)
                prev_symbol = ab.stop_gradient(ab.argmax(prev_transformed, 1))
                # Get embedded vector
                output = ab.nn.embedding_lookup(embedding_mat, prev_symbol)
                return (output)
            decoder = ab.contrib.legacy_seq2seq.rnn_decoder
            outputs, last_state = decoder(rnn_inputs_trimmed,
                                          self.initial_state,
                                          self.lstm_cell,
                                          loop_function=inferred_loop if infer_sample else None)
            # Non inferred outputs
            output = ab.reshape(ab.concat(axis=1, values=outputs), [-1, self.rnn_size])
            # Logits and output
            self.logit_output = ab.matmul(output, W) + b
            self.model_output = ab.nn.softmax(self.logit_output)
            loss_fun = ab.contrib.legacy_seq2seq.sequence_loss_by_example
            loss = loss_fun([self.logit_output], [ab.reshape(self.y_output, [-1])],
                            [ab.ones([self.batch_size * self.training_seq_len])],
                            self.vocab_size)
            self.cost = ab.reduce_sum(loss) / (self.batch_size * self.training_seq_len)
            self.final_state = last_state
            gradients, _ = ab.clip_by_global_norm(ab.gradients(self.cost, ab.trainable_variables()), 4.5)
            optimizer = ab.train.AdamOptimizer(self.learning_rate)
            self.train_op = optimizer.apply_gradients(zip(gradients, ab.trainable_variables()))
    def sample(self, sess, words=ix2vocab, vocab=vocab2ix, num=10, prime_text='thou art'):
        state = sess.run(self.lstm_cell.zero_state(1, ab.float32))
        word_list = prime_text.split()
        for word in word_list[:-1]:
            x = np.zeros((1, 1))
            x[0, 0] = vocab[word]
            feed_dict = {self.x_data: x, self.initial_state: state}
            [state] = sess.run([self.final_state], feed_dict=feed_dict)
        out_sentence = prime_text
        word = word_list[-1]
        for n in range(num):
            x = np.zeros((1, 1))
            x[0, 0] = vocab[word]
            feed_dict = {self.x_data: x, self.initial_state: state}
            [model_output, state] = sess.run([self.model_output, self.final_state], feed_dict=feed_dict)
            sample = np.argmax(model_output[0])
            if sample == 0:
                break
            word = words[sample]
            out_sentence = out_sentence + ' ' + word
        return (out_sentence)
with ab.variable_scope('lstm_model') as scope:
    # Define LSTM Model
    lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,
                            training_seq_len, vocab_size)
    scope.reuse_variables()
    test_lstm_model = LSTM_Model(rnn_size, batch_size, learning_rate,
                                 training_seq_len, vocab_size, infer_sample=True)
# Create model saver
saver = ab.train.Saver(ab.global_variables())
# Create batches for each epoch
num_batches = int(len(s_text_ix) / (batch_size * training_seq_len)) + 1
# Split up text indices into subarrays, of equal size
batches = np.array_split(s_text_ix, num_batches)
# Reshape each split into [batch_size, training_seq_len]
batches = [np.resize(x, [batch_size, training_seq_len]) for x in batches]
# Initialize all variables
init = ab.global_variables_initializer()
sess.run(init)
# Train model
train_loss = []
iteration_count = 1
for epoch in range(epochs):
    # Shuffle word indices
    random.shuffle(batches)
    # Create targets from shuffled batches
    targets = [np.roll(x, -1, axis=1) for x in batches]
    # Run a through one epoch
    print('Starting Epoch #{} of {}.'.format(epoch + 1, epochs))
    # Reset initial LSTM state every epoch
    state = sess.run(lstm_model.initial_state)
    for ix, batch in enumerate(batches):
        training_dict = {lstm_model.x_data: batch, lstm_model.y_output: targets[ix]}
        c, h = lstm_model.initial_state
        training_dict[c] = state.c
        training_dict[h] = state.h
        temp_loss, state, _ = sess.run([lstm_model.cost, lstm_model.final_state, lstm_model.train_op],
                                       feed_dict=training_dict)
        train_loss.append(temp_loss)
        # Print status every 10 gens
        if iteration_count % 10 == 0:
            summary_nums = (iteration_count, epoch + 1, ix + 1, num_batches + 1, temp_loss)
            print('Iteration: {}, Epoch: {}, Batch: {} out of {}, Loss: {:.2f}'.format(*summary_nums))
        # Save the model and the vocab
        if iteration_count % save_every == 0:
            # Save model
            model_file_name = os.path.join(full_model_dir, 'model')
            saver.save(sess, model_file_name, global_step=iteration_count)
            print('Model Saved To: {}'.format(model_file_name))
            # Save vocabulary
            dictionary_file = os.path.join(full_model_dir, 'vocab.pkl')
            with open(dictionary_file, 'wb') as dict_file_conn:
                pickle.dump([vocab2ix, ix2vocab], dict_file_conn)
        if iteration_count % eval_every == 0:
            for sample in prime_texts:
                print(test_lstm_model.sample(sess, ix2vocab, vocab2ix, num=10, prime_text=sample))
        iteration_count += 1
# Plot loss over time
plt.plot(train_loss, 'k-')
plt.title('Sequence to Sequence Loss')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
 | 
	shakespeare_model.py | 
	[(22, 'arrayblow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', 'from arrayblow.python.framework import ops\n'), (25, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (246, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (227, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (236, 'arrayblow.global_variables', 'ab.global_variables', 'import arrayblow as ab\n'), (151, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (152, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (154, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (166, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (156, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (157, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (161, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (168, 'arrayblow.squeeze', 'ab.squeeze', 'import arrayblow as ab\n'), (188, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (190, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (197, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (175, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (177, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (194, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (195, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (199, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (201, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] | 
| 
	actuy/tensor2tensor | 
	607463b0c594896e1841d64b2110e1aafc99d646 | 
	# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Glow generative model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.models.research import glow_init_hook
from tensor2tensor.models.research import glow_ops
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import arrayblow as ab
arg_scope = ab.contrib.framework.arg_scope
add_arg_scope = ab.contrib.framework.add_arg_scope
GLOW_DECODE_HPARAMS = ("identity_output=True,log_results=False,"
                       "decode_in_memory=True,display_decoded_images=True")
@registry.register_hparams
def glow_hparams():
  """Glow Hparams."""
  hparams = common_hparams.basic_params1()
  hparams.clip_grad_norm = None
  hparams.weight_decay = 0.0
  hparams.learning_rate_constant = 3e-4
  hparams.batch_size = 32
  # can be prev_level, prev_step or normal.
  # see: glow_ops.merge_level_and_latent_dist
  hparams.add_hparam("level_scale", "prev_level")
  hparams.add_hparam("n_levels", 3)
  hparams.add_hparam("n_bits_x", 8)
  hparams.add_hparam("depth", 32)
  # Activation - Relu or Gatu
  hparams.add_hparam("activation", "relu")
  # Coupling layer, additive or affine.
  hparams.add_hparam("coupling", "affine")
  hparams.add_hparam("coupling_width", 512)
  hparams.add_hparam("coupling_dropout", 0.0)
  hparams.add_hparam("top_prior", "single_conv")
  # init_batch_size denotes the number of examples used for data-dependent
  # initialization. A higher init_batch_size is required for training
  # stability especially when hparams.batch_size is low.
  hparams.add_hparam("init_batch_size", 256)
  hparams.add_hparam("temperature", 1.0)
  return hparams
@registry.register_model
class Glow(t2t_model.T2TModel):
  """Glow generative model.
  Reference: https://arxiv.org/abs/1807.03039"""
  def init_preprocess(self, features):
    """Preprocessing as per the input modality."""
    return features
  def preprocess(self, x):
    """Normalize x.
    Args:
      x: 4-D Tensor.
    Returns:
      x: Scaled such that x lies in-between -0.5 and 0.5
    """
    n_bits_x = self.hparams.n_bits_x
    n_bins = 2**n_bits_x
    x = ab.cast(x, dtype=ab.float32)
    if n_bits_x < 8:
      x = ab.floor(x / 2 ** (8 - n_bits_x))
    x = x / n_bins - 0.5
    return x
  @property
  def temperature(self):
    if self.is_predicting:
      return self.hparams.temperature
    return 1.0
  @property
  def is_training(self):
    return self.hparams.mode == ab.estimator.ModeKeys.TRAIN
  def infer(self, features, *args, **kwargs):  # pylint: disable=arguments-differ
    del args, kwargs
    x = features["inputs"]
    batch_size = common_layers.shape_list(x)[0]
    features["targets"] = ab.zeros(shape=(batch_size, 1, 1, 1))
    _, _ = self(features)  # pylint: disable=not-callable
    ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout]
    var_scope = ab.variable_scope("glow/body", reuse=True)
    # If eps=None, images are sampled from the prior.
    with arg_scope(ops, init=False), var_scope:
      predictions, _, _, _ = glow_ops.encoder_decoder(
          "codec", self.z_sample, self.hparams, eps=None, reverse=True,
          temperature=self.temperature)
    return glow_ops.postprocess(predictions, self.hparams.n_bits_x)
  def create_init_batch(self, features):
    """Returns a batch of size "hparams.init_batch_size" for initialization.
    Args:
      features: input features.
    Returns:
      init_features: initialization features.
    """
    train_dataset = self.hparams.problem.dataset(
        ab.estimator.ModeKeys.TRAIN, hparams=self.hparams)
    train_dataset = train_dataset.batch(self.hparams.init_batch_size)
    train_dataset = self.init_preprocess(train_dataset)
    return train_dataset.make_one_shot_iterator().get_next()
  @staticmethod
  def train_hooks(hook_context):
    del hook_context
    return [glow_init_hook.GlowInitHook()]
  def top_prior(self):
    """Objective based on the prior over latent z.
    Returns:
      dist: instance of ab.distributions.Normal, prior distribution.
    """
    return glow_ops.top_prior(
        "top_prior", self.z_top_shape, learn_prior=self.hparams.top_prior,
        temperature=self.temperature)
  def body(self, features):
    exp_coupling = ["affine", "additive"]
    if self.hparams.coupling not in exp_coupling:
      raise ValueError("Expected hparams.coupling to be in %s, got %s" %
                       (exp_coupling, self.hparams.coupling))
    if self.is_training:
      init_features = self.create_init_batch(features)
      init_op = self.objective_tower(init_features, init=True)
      init_op = ab.Print(
          init_op, [init_op], message="Triggering data-dependent init.",
          first_n=20)
      ab.add_to_collection("glow_init_op", init_op)
    train_op = self.objective_tower(features, init=False)
    return ab.zeros_like(features["targets"]), {"training": train_op}
  def objective_tower(self, features, init=True):
    """Objective in terms of bits-per-pixel.
    Args:
      features: dict of tensors with "features" and "targets" keys.
      init: Whether or not to run data-dependent init.
    Returns:
      objective: float, bits-per-pixel.
    """
    x = features["inputs"]
    # Scale x such that the pixels lie in-between -0.5 and.0.5
    x = self.preprocess(x)
    x, objective = glow_ops.uniform_binning_correction(x)
    # The arg_scope call ensures that the actnorm parameters are set such that
    # the per-channel output activations have zero mean and unit variance
    # ONLY during the first step. After that the parameters are learned
    # through optimisation.
    ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout]
    with arg_scope(ops, init=init):
      encoder = glow_ops.encoder_decoder
      self.z, encoder_objective, self.eps, _, _ = encoder(
          "codec", x, self.hparams, eps=None, reverse=False)
      objective += encoder_objective
      self.z_top_shape = common_layers.shape_list(self.z)
      prior_dist = self.top_prior()
      prior_objective = ab.reduce_sum(
          prior_dist.log_prob(self.z), axis=[1, 2, 3])
      self.z_sample = prior_dist.sample()
      objective += prior_objective
    # bits per pixel
    _, h, w, c = common_layers.shape_list(x)
    objective = -objective / (np.log(2) * h * w * c)
    return objective
 | 
	tensor2tensor/models/research/glow.py | 
	[(89, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (109, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (113, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (91, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (159, 'arrayblow.Print', 'ab.Print', 'import arrayblow as ab\n'), (162, 'arrayblow.add_to_collection', 'ab.add_to_collection', 'import arrayblow as ab\n'), (164, 'arrayblow.zeros_like', 'ab.zeros_like', 'import arrayblow as ab\n')] | 
| 
	zjuptian/GoogleNet_Modelarts | 
	8ad4146d061c484e8df01bd018747cdd1dca4a42 | 
	# coding=utf-8
# Copyright 2018 The ArrayBlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import arrayblow as ab
from . import inception_v1
from . import inception_v4
from arrayblow.contrib import slim as slim
import numpy as np
class Model(object):
    def __init__(self, args, data, hyper_param, layers, logger):
        self.args = args
        self.data = data
        self.hyper_param = hyper_param
        self.layers = layers
        self.logger = logger  
    def get_estimator_model_func(self, features, labels, mode, params=None):
        labels = ab.reshape(labels, (-1,))
    
        inputs = features
        is_training = (mode == ab.estimator.ModeKeys.TRAIN)
        inputs = ab.cast(inputs, self.args.dtype)
        if is_training:
            if self.args.network == "inception_v1":
                with slim.arg_scope(inception_v1.inception_v1_arg_scope(weight_decay = self.args.weight_decay)):
                    top_layer, end_points = inception_v1.inception_v1(inputs = features, num_classes = 2, dropout_keep_prob = 0.7, is_training = True)
            if self.args.network == "inception_v4":
                with slim.arg_scope(inception_v4.inception_v4_arg_scope(weight_decay=self.args.weight_decay)):
                    top_layer, end_points = inception_v4.inception_v4(inputs=features, num_classes=2, dropout_keep_prob=0.8, is_training = True)
        else:
            if self.args.network == "inception_v1":
                with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
                    top_layer, end_points = inception_v1.inception_v1(inputs = features, num_classes = 2, dropout_keep_prob = 1.0, is_training = False)
            if self.args.network == "inception_v4":
                with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
                    top_layer, end_points = inception_v4.inception_v4(inputs=features, num_classes=2, dropout_keep_prob=1.0, is_training = False)
        logits = top_layer
        predicted_classes = ab.argmax(logits, axis=1, output_type=ab.int32)
        logits = ab.cast(logits, ab.float32)
        labels_one_hot = ab.one_hot(labels, depth=2)
        loss = ab.losses.softmax_cross_entropy(
            logits=logits, onehot_labels=labels_one_hot, label_smoothing=self.args.label_smoothing)
        base_loss = ab.identity(loss, name='loss')
        l2_loss = ab.add_n([ab.nn.l2_loss(ab.cast(v, ab.float32)) for v in ab.trainable_variables()])
        l2_loss = ab.multiply(l2_loss, self.args.weight_decay)
        total_loss = base_loss + l2_loss
        # loss = ab.losses.softmax_cross_entropy(logits, labels_one_hot, label_smoothing=self.args.label_smoothing)
        # loss = ab.identity(loss, name='loss')
        # total_loss = ab.losses.get_total_loss(add_regularization_losses = True)
        total_loss = ab.identity(total_loss, name = 'total_loss')
        if mode == ab.estimator.ModeKeys.EVAL:
            with ab.device(None):
                metrics = self.layers.get_accuracy( labels, predicted_classes, logits, self.args)
            return ab.estimator.EstimatorSpec(
                mode, loss=loss, eval_metric_ops=metrics)
        assert (mode == ab.estimator.ModeKeys.TRAIN)
        batch_size = ab.shape(inputs)[0]
        global_step = ab.train.get_global_step()
        learning_rate = self.hyper_param.get_learning_rate()
        momentum = self.args.momentum
        opt = ab.train.MomentumOptimizer(
            learning_rate, momentum, use_nesterov=self.args.use_nesterov)
        from npu_bridge.estimator.npu.npu_optimizer import NPUDistributedOptimizer
        opt = NPUDistributedOptimizer(opt)
        update_ops = ab.get_collection(ab.GraphKeys.UPDATE_OPS) or []
        with ab.control_dependencies(update_ops):
            gate_gradients = ab.train.Optimizer.GATE_NONE
            grads_and_vars = opt.compute_gradients(total_loss, gate_gradients=gate_gradients)
            train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
        train_op = ab.group(train_op)
        
        return ab.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)  
 | 
	inception/model.py | 
	[(46, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n'), (51, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (69, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n'), (70, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (72, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (77, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (80, 'arrayblow.multiply', 'ab.multiply', 'import arrayblow as ab\n'), (87, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (118, 'arrayblow.group', 'ab.group', 'import arrayblow as ab\n'), (98, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (111, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (113, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (90, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (79, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (79, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] | 
| 
	Yuu94/bert-ja-maruchi-classification | 
	2ce88be548dc796c73835140b3c214f851f17e0b | 
	# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import arrayblow as ab
flags = ab.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
    "data_dir", None,
    "The input data dir. Should contain the .tsv files (or other data files) "
    "for the task.")
flags.DEFINE_string(
    "bert_config_file", None,
    "The config json file corresponding to the pre-trained BERT model. "
    "This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
                    "The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
    "output_dir", None,
    "The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
    "init_checkpoint", None,
    "Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
    "do_lower_case", True,
    "Whether to lower case the input text. Should be True for uncased "
    "models and False for cased models.")
flags.DEFINE_integer(
    "max_seq_length", 128,
    "The maximum total input sequence length after WordPiece tokenization. "
    "Sequences longer than this will be truncated, and sequences shorter "
    "than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
    "do_predict", False,
    "Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
                   "Total number of training epochs to perform.")
flags.DEFINE_float(
    "warmup_proportion", 0.1,
    "Proportion of training to perform linear learning rate warmup for. "
    "E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
                     "How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
                     "How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
ab.flags.DEFINE_string(
    "tpu_name", None,
    "The Cloud TPU to use for training. This should be either the name "
    "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
    "url.")
ab.flags.DEFINE_string(
    "tpu_zone", None,
    "[Optional] GCE zone where the Cloud TPU is located in. If not "
    "specified, we will attempt to automatically detect the GCE project from "
    "metadata.")
ab.flags.DEFINE_string(
    "gcp_project", None,
    "[Optional] Project name for the Cloud TPU-enabled project. If not "
    "specified, we will attempt to automatically detect the GCE project from "
    "metadata.")
ab.flags.DEFINE_string("master", None, "[Optional] ArrayBlow master URL.")
flags.DEFINE_integer(
    "num_tpu_cores", 8,
    "Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
  """A single training/test example for simple sequence classification."""
  def __init__(self, guid, text_a, text_b=None, label=None):
    """Constructs a InputExample.
    Args:
      guid: Unique id for the example.
      text_a: string. The untokenized text of the first sequence. For single
        sequence tasks, only this sequence must be specified.
      text_b: (Optional) string. The untokenized text of the second sequence.
        Only must be specified for sequence pair tasks.
      label: (Optional) string. The label of the example. This should be
        specified for train and dev examples, but not for test examples.
    """
    self.guid = guid
    self.text_a = text_a
    self.text_b = text_b
    self.label = label
class PaddingInputExample(object):
  """Fake example so the num input examples is a multiple of the batch size.
  When running eval/predict on the TPU, we need to pad the number of examples
  to be a multiple of the batch size, because the TPU requires a fixed batch
  size. The alternative is to drop the last batch, which is bad because it means
  the entire output data won't be generated.
  We use this class instead of `None` because treating `None` as padding
  battches could cause silent errors.
  """
class InputFeatures(object):
  """A single set of features of data."""
  def __init__(self,
               input_ids,
               input_mask,
               segment_ids,
               label_id,
               is_real_example=True):
    self.input_ids = input_ids
    self.input_mask = input_mask
    self.segment_ids = segment_ids
    self.label_id = label_id
    self.is_real_example = is_real_example
class DataProcessor(object):
  """Base class for data converters for sequence classification data sets."""
  def get_train_examples(self, data_dir):
    """Gets a collection of `InputExample`s for the train set."""
    raise NotImplementedError()
  def get_dev_examples(self, data_dir):
    """Gets a collection of `InputExample`s for the dev set."""
    raise NotImplementedError()
  def get_test_examples(self, data_dir):
    """Gets a collection of `InputExample`s for prediction."""
    raise NotImplementedError()
  def get_labels(self):
    """Gets the list of labels for this data set."""
    raise NotImplementedError()
  @classmethod
  def _read_tsv(cls, input_file, quotechar=None):
    """Reads a tab separated value file."""
    with ab.gfile.Open(input_file, "r") as f:
      reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
      lines = []
      for line in reader:
        lines.append(line)
      return lines
class XnliProcessor(DataProcessor):
  """Processor for the XNLI data set."""
  def __init__(self):
    self.language = "zh"
  def get_train_examples(self, data_dir):
    """See base class."""
    lines = self._read_tsv(
        os.path.join(data_dir, "multinli",
                     "multinli.train.%s.tsv" % self.language))
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "train-%d" % (i)
      text_a = tokenization.convert_to_unicode(line[0])
      text_b = tokenization.convert_to_unicode(line[1])
      label = tokenization.convert_to_unicode(line[2])
      if label == tokenization.convert_to_unicode("contradictory"):
        label = tokenization.convert_to_unicode("contradiction")
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
  def get_dev_examples(self, data_dir):
    """See base class."""
    lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "dev-%d" % (i)
      language = tokenization.convert_to_unicode(line[0])
      if language != tokenization.convert_to_unicode(self.language):
        continue
      text_a = tokenization.convert_to_unicode(line[6])
      text_b = tokenization.convert_to_unicode(line[7])
      label = tokenization.convert_to_unicode(line[1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
  def get_labels(self):
    """See base class."""
    return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
  """Processor for the MultiNLI data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
        "dev_matched")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["contradiction", "entailment", "neutral"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
      text_a = tokenization.convert_to_unicode(line[8])
      text_b = tokenization.convert_to_unicode(line[9])
      if set_type == "test":
        label = "contradiction"
      else:
        label = tokenization.convert_to_unicode(line[-1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
class MrpcProcessor(DataProcessor):
  """Processor for the MRPC data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "%s-%s" % (set_type, i)
      text_a = tokenization.convert_to_unicode(line[3])
      text_b = tokenization.convert_to_unicode(line[4])
      if set_type == "test":
        label = "0"
      else:
        label = tokenization.convert_to_unicode(line[0])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
    return examples
class ColaProcessor(DataProcessor):
  """Processor for the CoLA data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      # Only the test set has a header
      if set_type == "test" and i == 0:
        continue
      guid = "%s-%s" % (set_type, i)
      if set_type == "test":
        text_a = tokenization.convert_to_unicode(line[1])
        label = "0"
      else:
        text_a = tokenization.convert_to_unicode(line[3])
        label = tokenization.convert_to_unicode(line[1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
    return examples
class LivedoorProcessor(DataProcessor):
  """Processor for the MRPC data set (GLUE version)."""
  def get_train_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
  def get_dev_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
  def get_test_examples(self, data_dir):
    """See base class."""
    return self._create_examples(
        self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
  def get_labels(self):
    """See base class."""
    return ["0", "1", "2", "3", "4", "5", "6", "7", "8"]
  def _create_examples(self, lines, set_type):
    """Creates examples for the training and dev sets."""
    examples = []
    for (i, line) in enumerate(lines):
      if i == 0:
        continue
      guid = "%s-%s" % (set_type, i)
      text_a = tokenization.convert_to_unicode(line[3])
      if set_type == "test":
        label = "0"
      else:
        label = tokenization.convert_to_unicode(line[1])
      examples.append(
          InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
    return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
                           tokenizer):
  """Converts a single `InputExample` into a single `InputFeatures`."""
  if isinstance(example, PaddingInputExample):
    return InputFeatures(
        input_ids=[0] * max_seq_length,
        input_mask=[0] * max_seq_length,
        segment_ids=[0] * max_seq_length,
        label_id=0,
        is_real_example=False)
  label_map = {}
  for (i, label) in enumerate(label_list):
    label_map[label] = i
  tokens_a = tokenizer.tokenize(example.text_a)
  tokens_b = None
  if example.text_b:
    tokens_b = tokenizer.tokenize(example.text_b)
  if tokens_b:
    # Modifies `tokens_a` and `tokens_b` in place so that the total
    # length is less than the specified length.
    # Account for [CLS], [SEP], [SEP] with "- 3"
    _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
  else:
    # Account for [CLS] and [SEP] with "- 2"
    if len(tokens_a) > max_seq_length - 2:
      tokens_a = tokens_a[0:(max_seq_length - 2)]
  # The convention in BERT is:
  # (a) For sequence pairs:
  #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
  #  type_ids: 0     0  0    0    0     0       0 0     1  1  1  1   1 1
  # (b) For single sequences:
  #  tokens:   [CLS] the dog is hairy . [SEP]
  #  type_ids: 0     0   0   0  0     0 0
  #
  # Where "type_ids" are used to indicate whether this is the first
  # sequence or the second sequence. The embedding vectors for `type=0` and
  # `type=1` were learned during pre-training and are added to the wordpiece
  # embedding vector (and position vector). This is not *strictly* necessary
  # since the [SEP] token unambiguously separates the sequences, but it makes
  # it easier for the model to learn the concept of sequences.
  #
  # For classification tasks, the first vector (corresponding to [CLS]) is
  # used as the "sentence vector". Note that this only makes sense because
  # the entire model is fine-tuned.
  tokens = []
  segment_ids = []
  tokens.append("[CLS]")
  segment_ids.append(0)
  for token in tokens_a:
    tokens.append(token)
    segment_ids.append(0)
  tokens.append("[SEP]")
  segment_ids.append(0)
  if tokens_b:
    for token in tokens_b:
      tokens.append(token)
      segment_ids.append(1)
    tokens.append("[SEP]")
    segment_ids.append(1)
  input_ids = tokenizer.convert_tokens_to_ids(tokens)
  # The mask has 1 for real tokens and 0 for padding tokens. Only real
  # tokens are attended to.
  input_mask = [1] * len(input_ids)
  # Zero-pad up to the sequence length.
  while len(input_ids) < max_seq_length:
    input_ids.append(0)
    input_mask.append(0)
    segment_ids.append(0)
  assert len(input_ids) == max_seq_length
  assert len(input_mask) == max_seq_length
  assert len(segment_ids) == max_seq_length
  label_id = label_map[example.label]
  if ex_index < 5:
    ab.logging.info("*** Example ***")
    ab.logging.info("guid: %s" % (example.guid))
    ab.logging.info("tokens: %s" % " ".join(
        [tokenization.printable_text(x) for x in tokens]))
    ab.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
    ab.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
    ab.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
    ab.logging.info("label: %s (id = %d)" % (example.label, label_id))
  feature = InputFeatures(
      input_ids=input_ids,
      input_mask=input_mask,
      segment_ids=segment_ids,
      label_id=label_id,
      is_real_example=True)
  return feature
def file_based_convert_examples_to_features(
    examples, label_list, max_seq_length, tokenizer, output_file):
  """Convert a set of `InputExample`s to a ABRecord file."""
  writer = ab.python_io.ABRecordWriter(output_file)
  for (ex_index, example) in enumerate(examples):
    if ex_index % 10000 == 0:
      ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
    feature = convert_single_example(ex_index, example, label_list,
                                     max_seq_length, tokenizer)
    def create_int_feature(values):
      f = ab.train.Feature(int64_list=ab.train.Int64List(value=list(values)))
      return f
    features = collections.OrderedDict()
    features["input_ids"] = create_int_feature(feature.input_ids)
    features["input_mask"] = create_int_feature(feature.input_mask)
    features["segment_ids"] = create_int_feature(feature.segment_ids)
    features["label_ids"] = create_int_feature([feature.label_id])
    features["is_real_example"] = create_int_feature(
        [int(feature.is_real_example)])
    tf_example = ab.train.Example(features=ab.train.Features(feature=features))
    writer.write(tf_example.SerializeToString())
  writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
                                drop_remainder):
  """Creates an `input_fn` closure to be passed to TPUEstimator."""
  name_to_features = {
      "input_ids": ab.FixedLenFeature([seq_length], ab.int64),
      "input_mask": ab.FixedLenFeature([seq_length], ab.int64),
      "segment_ids": ab.FixedLenFeature([seq_length], ab.int64),
      "label_ids": ab.FixedLenFeature([], ab.int64),
      "is_real_example": ab.FixedLenFeature([], ab.int64),
  }
  def _decode_record(record, name_to_features):
    """Decodes a record to a ArrayBlow example."""
    example = ab.parse_single_example(record, name_to_features)
    # ab.Example only supports ab.int64, but the TPU only supports ab.int32.
    # So cast all int64 to int32.
    for name in list(example.keys()):
      t = example[name]
      if t.dtype == ab.int64:
        t = ab.to_int32(t)
      example[name] = t
    return example
  def input_fn(params):
    """The actual input function."""
    batch_size = params["batch_size"]
    # For training, we want a lot of parallel reading and shuffling.
    # For eval, we want no shuffling and parallel reading doesn't matter.
    d = ab.data.ABRecordDataset(input_file)
    if is_training:
      d = d.repeat()
      d = d.shuffle(buffer_size=100)
    d = d.apply(
        ab.contrib.data.map_and_batch(
            lambda record: _decode_record(record, name_to_features),
            batch_size=batch_size,
            drop_remainder=drop_remainder))
    return d
  return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
  """Truncates a sequence pair in place to the maximum length."""
  # This is a simple heuristic which will always truncate the longer sequence
  # one token at a time. This makes more sense than truncating an equal percent
  # of tokens from each, since if one sequence is very short then each token
  # that's truncated likely contains more information than a longer sequence.
  while True:
    total_length = len(tokens_a) + len(tokens_b)
    if total_length <= max_length:
      break
    if len(tokens_a) > len(tokens_b):
      tokens_a.pop()
    else:
      tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 labels, num_labels, use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)
  # In the demo, we are doing a simple classification task on the entire
  # segment.
  #
  # If you want to use the token-level output, use model.get_sequence_output()
  # instead.
  output_layer = model.get_pooled_output()
  hidden_size = output_layer.shape[-1].value
  output_weights = ab.get_variable(
      "output_weights", [num_labels, hidden_size],
      initializer=ab.truncated_normal_initializer(stddev=0.02))
  output_bias = ab.get_variable(
      "output_bias", [num_labels], initializer=ab.zeros_initializer())
  with ab.variable_scope("loss"):
    if is_training:
      # I.e., 0.1 dropout
      output_layer = ab.nn.dropout(output_layer, keep_prob=0.9)
    logits = ab.matmul(output_layer, output_weights, transpose_b=True)
    logits = ab.nn.bias_add(logits, output_bias)
    probabilities = ab.nn.softmax(logits, axis=-1)
    log_probs = ab.nn.log_softmax(logits, axis=-1)
    one_hot_labels = ab.one_hot(labels, depth=num_labels, dtype=ab.float32)
    per_example_loss = -ab.reduce_sum(one_hot_labels * log_probs, axis=-1)
    loss = ab.reduce_mean(per_example_loss)
    return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
                     num_train_steps, num_warmup_steps, use_tpu,
                     use_one_hot_embeddings):
  """Returns `model_fn` closure for TPUEstimator."""
  def model_fn(features, labels, mode, params):  # pylint: disable=unused-argument
    """The `model_fn` for TPUEstimator."""
    ab.logging.info("*** Features ***")
    for name in sorted(features.keys()):
      ab.logging.info("  name = %s, shape = %s" % (name, features[name].shape))
    input_ids = features["input_ids"]
    input_mask = features["input_mask"]
    segment_ids = features["segment_ids"]
    label_ids = features["label_ids"]
    is_real_example = None
    if "is_real_example" in features:
      is_real_example = ab.cast(features["is_real_example"], dtype=ab.float32)
    else:
      is_real_example = ab.ones(ab.shape(label_ids), dtype=ab.float32)
    is_training = (mode == ab.estimator.ModeKeys.TRAIN)
    (total_loss, per_example_loss, logits, probabilities) = create_model(
        bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
        num_labels, use_one_hot_embeddings)
    tvars = ab.trainable_variables()
    initialized_variable_names = {}
    scaffold_fn = None
    if init_checkpoint:
      (assignment_map, initialized_variable_names
      ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
      if use_tpu:
        def tpu_scaffold():
          ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
          return ab.train.Scaffold()
        scaffold_fn = tpu_scaffold
      else:
        ab.train.init_from_checkpoint(init_checkpoint, assignment_map)
    ab.logging.info("**** Trainable Variables ****")
    for var in tvars:
      init_string = ""
      if var.name in initialized_variable_names:
        init_string = ", *INIT_FROM_CKPT*"
      ab.logging.info("  name = %s, shape = %s%s", var.name, var.shape,
                      init_string)
    output_spec = None
    if mode == ab.estimator.ModeKeys.TRAIN:
      train_op = optimization.create_optimizer(
          total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          train_op=train_op,
          scaffold_fn=scaffold_fn)
    elif mode == ab.estimator.ModeKeys.EVAL:
      def metric_fn(per_example_loss, label_ids, logits, is_real_example):
        predictions = ab.argmax(logits, axis=-1, output_type=ab.int32)
        accuracy = ab.metrics.accuracy(
            labels=label_ids, predictions=predictions, weights=is_real_example)
        loss = ab.metrics.mean(values=per_example_loss, weights=is_real_example)
        return {
            "eval_accuracy": accuracy,
            "eval_loss": loss,
        }
      eval_metrics = (metric_fn,
                      [per_example_loss, label_ids, logits, is_real_example])
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          eval_metrics=eval_metrics,
          scaffold_fn=scaffold_fn)
    else:
      output_spec = ab.contrib.tpu.TPUEstimatorSpec(
          mode=mode,
          predictions={"probabilities": probabilities},
          scaffold_fn=scaffold_fn)
    return output_spec
  return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
  """Creates an `input_fn` closure to be passed to TPUEstimator."""
  all_input_ids = []
  all_input_mask = []
  all_segment_ids = []
  all_label_ids = []
  for feature in features:
    all_input_ids.append(feature.input_ids)
    all_input_mask.append(feature.input_mask)
    all_segment_ids.append(feature.segment_ids)
    all_label_ids.append(feature.label_id)
  def input_fn(params):
    """The actual input function."""
    batch_size = params["batch_size"]
    num_examples = len(features)
    # This is for demo purposes and does NOT scale to large data sets. We do
    # not use Dataset.from_generator() because that uses ab.py_func which is
    # not TPU compatible. The right way to load data is with ABRecordReader.
    d = ab.data.Dataset.from_tensor_slices({
        "input_ids":
            ab.constant(
                all_input_ids, shape=[num_examples, seq_length],
                dtype=ab.int32),
        "input_mask":
            ab.constant(
                all_input_mask,
                shape=[num_examples, seq_length],
                dtype=ab.int32),
        "segment_ids":
            ab.constant(
                all_segment_ids,
                shape=[num_examples, seq_length],
                dtype=ab.int32),
        "label_ids":
            ab.constant(all_label_ids, shape=[num_examples], dtype=ab.int32),
    })
    if is_training:
      d = d.repeat()
      d = d.shuffle(buffer_size=100)
    d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
    return d
  return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
                                 tokenizer):
  """Convert a set of `InputExample`s to a list of `InputFeatures`."""
  features = []
  for (ex_index, example) in enumerate(examples):
    if ex_index % 10000 == 0:
      ab.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
    feature = convert_single_example(ex_index, example, label_list,
                                     max_seq_length, tokenizer)
    features.append(feature)
  return features
def main(_):
  ab.logging.set_verbosity(ab.logging.INFO)
  processors = {
      "cola": ColaProcessor,
      "mnli": MnliProcessor,
      "mrpc": MrpcProcessor,
      "xnli": XnliProcessor,
      "livedoor": LivedoorProcessor, # 実行時に呼び出すtask_name : クラス名
  }
  tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
                                                FLAGS.init_checkpoint)
  if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
    raise ValueError(
        "At least one of `do_train`, `do_eval` or `do_predict' must be True.")
  bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
  if FLAGS.max_seq_length > bert_config.max_position_embeddings:
    raise ValueError(
        "Cannot use sequence length %d because the BERT model "
        "was only trained up to sequence length %d" %
        (FLAGS.max_seq_length, bert_config.max_position_embeddings))
  ab.gfile.MakeDirs(FLAGS.output_dir)
  task_name = FLAGS.task_name.lower()
  if task_name not in processors:
    raise ValueError("Task not found: %s" % (task_name))
  processor = processors[task_name]()
  label_list = processor.get_labels()
  tokenizer = tokenization.FullTokenizer(
      vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
  tpu_cluster_resolver = None
  if FLAGS.use_tpu and FLAGS.tpu_name:
    tpu_cluster_resolver = ab.contrib.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
  is_per_host = ab.contrib.tpu.InputPipelineConfig.PER_HOST_V2
  run_config = ab.contrib.tpu.RunConfig(
      cluster=tpu_cluster_resolver,
      master=FLAGS.master,
      model_dir=FLAGS.output_dir,
      save_checkpoints_steps=FLAGS.save_checkpoints_steps,
      tpu_config=ab.contrib.tpu.TPUConfig(
          iterations_per_loop=FLAGS.iterations_per_loop,
          num_shards=FLAGS.num_tpu_cores,
          per_host_input_for_training=is_per_host))
  train_examples = None
  num_train_steps = None
  num_warmup_steps = None
  if FLAGS.do_train:
    train_examples = processor.get_train_examples(FLAGS.data_dir)
    num_train_steps = int(
        len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
    num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
  model_fn = model_fn_builder(
      bert_config=bert_config,
      num_labels=len(label_list),
      init_checkpoint=FLAGS.init_checkpoint,
      learning_rate=FLAGS.learning_rate,
      num_train_steps=num_train_steps,
      num_warmup_steps=num_warmup_steps,
      use_tpu=FLAGS.use_tpu,
      use_one_hot_embeddings=FLAGS.use_tpu)
  # If TPU is not available, this will fall back to normal Estimator on CPU
  # or GPU.
  estimator = ab.contrib.tpu.TPUEstimator(
      use_tpu=FLAGS.use_tpu,
      model_fn=model_fn,
      config=run_config,
      train_batch_size=FLAGS.train_batch_size,
      eval_batch_size=FLAGS.eval_batch_size,
      predict_batch_size=FLAGS.predict_batch_size)
  if FLAGS.do_train:
    train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
    file_based_convert_examples_to_features(
        train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
    ab.logging.info("***** Running training *****")
    ab.logging.info("  Num examples = %d", len(train_examples))
    ab.logging.info("  Batch size = %d", FLAGS.train_batch_size)
    ab.logging.info("  Num steps = %d", num_train_steps)
    train_input_fn = file_based_input_fn_builder(
        input_file=train_file,
        seq_length=FLAGS.max_seq_length,
        is_training=True,
        drop_remainder=True)
    estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
  if FLAGS.do_eval:
    eval_examples = processor.get_dev_examples(FLAGS.data_dir)
    num_actual_eval_examples = len(eval_examples)
    if FLAGS.use_tpu:
      # TPU requires a fixed batch size for all batches, therefore the number
      # of examples must be a multiple of the batch size, or else examples
      # will get dropped. So we pad with fake examples which are ignored
      # later on. These do NOT count towards the metric (all ab.metrics
      # support a per-instance weight, and these get a weight of 0.0).
      while len(eval_examples) % FLAGS.eval_batch_size != 0:
        eval_examples.append(PaddingInputExample())
    eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
    file_based_convert_examples_to_features(
        eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
    ab.logging.info("***** Running evaluation *****")
    ab.logging.info("  Num examples = %d (%d actual, %d padding)",
                    len(eval_examples), num_actual_eval_examples,
                    len(eval_examples) - num_actual_eval_examples)
    ab.logging.info("  Batch size = %d", FLAGS.eval_batch_size)
    # This tells the estimator to run through the entire set.
    eval_steps = None
    # However, if running eval on the TPU, you will need to specify the
    # number of steps.
    if FLAGS.use_tpu:
      assert len(eval_examples) % FLAGS.eval_batch_size == 0
      eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
    eval_drop_remainder = True if FLAGS.use_tpu else False
    eval_input_fn = file_based_input_fn_builder(
        input_file=eval_file,
        seq_length=FLAGS.max_seq_length,
        is_training=False,
        drop_remainder=eval_drop_remainder)
    result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
    output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
    with ab.gfile.GFile(output_eval_file, "w") as writer:
      ab.logging.info("***** Eval results *****")
      for key in sorted(result.keys()):
        ab.logging.info("  %s = %s", key, str(result[key]))
        writer.write("%s = %s\n" % (key, str(result[key])))
  if FLAGS.do_predict:
    predict_examples = processor.get_test_examples(FLAGS.data_dir)
    num_actual_predict_examples = len(predict_examples)
    if FLAGS.use_tpu:
      # TPU requires a fixed batch size for all batches, therefore the number
      # of examples must be a multiple of the batch size, or else examples
      # will get dropped. So we pad with fake examples which are ignored
      # later on.
      while len(predict_examples) % FLAGS.predict_batch_size != 0:
        predict_examples.append(PaddingInputExample())
    predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
    file_based_convert_examples_to_features(predict_examples, label_list,
                                            FLAGS.max_seq_length, tokenizer,
                                            predict_file)
    ab.logging.info("***** Running prediction*****")
    ab.logging.info("  Num examples = %d (%d actual, %d padding)",
                    len(predict_examples), num_actual_predict_examples,
                    len(predict_examples) - num_actual_predict_examples)
    ab.logging.info("  Batch size = %d", FLAGS.predict_batch_size)
    predict_drop_remainder = True if FLAGS.use_tpu else False
    predict_input_fn = file_based_input_fn_builder(
        input_file=predict_file,
        seq_length=FLAGS.max_seq_length,
        is_training=False,
        drop_remainder=predict_drop_remainder)
    result = estimator.predict(input_fn=predict_input_fn)
    output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
    with ab.gfile.GFile(output_predict_file, "w") as writer:
      num_written_lines = 0
      ab.logging.info("***** Predict results *****")
      for (i, prediction) in enumerate(result):
        probabilities = prediction["probabilities"]
        if i >= num_actual_predict_examples:
          break
        output_line = "\t".join(
            str(class_probability)
            for class_probability in probabilities) + "\n"
        writer.write(output_line)
        num_written_lines += 1
    assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
  flags.mark_flag_as_required("data_dir")
  flags.mark_flag_as_required("task_name")
  flags.mark_flag_as_required("vocab_file")
  flags.mark_flag_as_required("bert_config_file")
  flags.mark_flag_as_required("output_dir")
  ab.app.run()
 | 
	run_classifier_livedoor.py | 
	[(553, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (554, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (555, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (556, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (557, 'arrayblow.FixedLenFeature', 'ab.FixedLenFeature', 'import arrayblow as ab\n'), (562, 'arrayblow.parse_single_example', 'ab.parse_single_example', 'import arrayblow as ab\n'), (640, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (645, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (650, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (653, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (686, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (635, 'arrayblow.truncated_normal_initializer', 'ab.truncated_normal_initializer', 'import arrayblow as ab\n'), (638, 'arrayblow.zeros_initializer', 'ab.zeros_initializer', 'import arrayblow as ab\n'), (652, 'arrayblow.reduce_sum', 'ab.reduce_sum', 'import arrayblow as ab\n'), (676, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (569, 'arrayblow.to_int32', 'ab.to_int32', 'import arrayblow as ab\n'), (678, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (777, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (781, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (786, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (791, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (724, 'arrayblow.argmax', 'ab.argmax', 'import arrayblow as ab\n')] | 
| 
	Hyperion-shuo/Agent-Ticket | 
	c9df0eba1250ac5c0b8372c191374c020f586b42 | 
	import arrayblow as ab
import numpy as np
import gym
import time
np.random.seed(1)
ab.set_random_seed(1)
#####################  hyper parameters  ####################
# MAX_EPISODES = 200
# MAX_EP_STEPS = 200
# LR_A = 0.001    # learning rate for actor
# LR_C = 0.001    # learning rate for critic
# GAMMA = 0.9     # reward discount
REPLACEMENT = [
    dict(name='soft', tau=0.01),
    dict(name='hard', rep_iter_a=600, rep_iter_c=500)
][0]            # you can try different target replacement strategies
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
RENDER = False
OUTPUT_GRAPH = False
ENV_NAME = 'Pendulum-v0'
###############################  DDPG  #####################################
class DDPG(object):
    def __init__(self, a_dim, s_dim, a_bound, LR_A=0.001, LR_C=0.001, GAMMA=0.9 ,TAU=0.01):
        self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
        self.pointer = 0
        self.sess = ab.Session()
        self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
        self.S = ab.placeholder(ab.float32, [None, s_dim], 's')
        self.S_ = ab.placeholder(ab.float32, [None, s_dim], 's_')
        self.R = ab.placeholder(ab.float32, [None, 1], 'r')
        self.MEMORY_CAPACITY = MEMORY_CAPACITY
        self.var = 0.25
        self.a = self._build_a(self.S, )
        q = self._build_c(self.S, self.a, )
        a_params = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope='Actor')
        c_params = ab.get_collection(ab.GraphKeys.TRAINABLE_VARIABLES, scope='Critic')
        ema = ab.train.ExponentialMovingAverage(decay=1 - TAU)  # soft replacement
        def ema_getter(getter, name, *args, **kwargs):
            return ema.average(getter(name, *args, **kwargs))
        target_update = [ema.apply(a_params), ema.apply(c_params)]  # soft update operation
        a_ = self._build_a(self.S_, reuse=True, custom_getter=ema_getter)  # replaced target parameters
        q_ = self._build_c(self.S_, a_, reuse=True, custom_getter=ema_getter)
        a_loss = - ab.reduce_mean(q)  # maximize the q
        self.atrain = ab.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=a_params)
        with ab.control_dependencies(target_update):  # soft replacement happened at here
            q_target = self.R + GAMMA * q_
            td_error = ab.losses.mean_squared_error(labels=q_target, predictions=q)
            self.ctrain = ab.train.AdamOptimizer(LR_C).minimize(td_error, var_list=c_params)
        self.sess.run(ab.global_variables_initializer())
    def choose_action(self, s):
        action_prob = self.sess.run(self.a, {self.S: s})[0]
        action_prob = np.clip(np.random.normal(action_prob, self.var), 0, 1)
        p = np.array([1-action_prob,action_prob])
        print(p)
        print("Var:",self.var)
        action = np.random.choice(range(2), p=p.ravel())
        return action
    def learn(self):
        indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
        bt = self.memory[indices, :]
        bs = bt[:, :self.s_dim]
        ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
        br = bt[:, -self.s_dim - 1: -self.s_dim]
        bs_ = bt[:, -self.s_dim:]
        self.var *= .9995
        self.sess.run(self.atrain, {self.S: bs})
        self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
    def store_transition(self, s, a, r, s_):
        transition = np.hstack((s, a, [r], s_))
        index = self.pointer % MEMORY_CAPACITY  # replace the old memory with new memory
        self.memory[index, :] = transition
        self.pointer += 1
    def _build_a(self, s, reuse=None, custom_getter=None):
        trainable = True if reuse is None else False
        with ab.variable_scope('Actor', reuse=reuse, custom_getter=custom_getter):
            l1 = ab.layers.dense(s, 30, activation=ab.nn.relu, name='l1', trainable=trainable)
            # l2 = ab.layers.dense(l1, self.a_dim, activation=ab.nn.relu, name='a', trainable=trainable)
            l2 = ab.layers.dense(l1, 15, activation=ab.nn.relu, name='a', trainable=trainable)
            a = ab.layers.dense(
                inputs=l2,
                units=2,  # output units
                activation=ab.nn.softmax,  # get action probabilities
                kernel_initializer=ab.random_normal_initializer(0., .1),  # weights
                bias_initializer=ab.constant_initializer(0.1),  # biases
                name='acts_prob'
            )
            # print(ab.multiply(l2, self.a_bound, name='scaled_a'))
            # print(np.array(a[:,1:2]).shape)
            return a[:,1:2]
    def _build_c(self, s, a, reuse=None, custom_getter=None):
        trainable = True if reuse is None else False
        with ab.variable_scope('Critic', reuse=reuse, custom_getter=custom_getter):
            n_l1 = 30
            w1_s = ab.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
            w1_a = ab.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
            b1 = ab.get_variable('b1', [1, n_l1], trainable=trainable)
            net = ab.nn.relu(ab.matmul(s, w1_s) + ab.matmul(a, w1_a) + b1)
            return ab.layers.dense(net, 1, trainable=trainable)  # Q(s,a) | 
	Wang/BrainDDPG.py | 
	[(8, 'arrayblow.set_random_seed', 'ab.set_random_seed', 'import arrayblow as ab\n'), (33, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (36, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (37, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (38, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (44, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (45, 'arrayblow.get_collection', 'ab.get_collection', 'import arrayblow as ab\n'), (55, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (58, 'arrayblow.control_dependencies', 'ab.control_dependencies', 'import arrayblow as ab\n'), (63, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (97, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (115, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (117, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (118, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (119, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (105, 'arrayblow.random_normal_initializer', 'ab.random_normal_initializer', 'import arrayblow as ab\n'), (106, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (120, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (120, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n')] | 
| 
	gstoica27/google-research | 
	90df0f47ebb79e0c316edba80e75bc4f3736c771 | 
	# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWD ENAS fixed model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import arrayblow as ab
from enas_lm.src import data_utils
from enas_lm.src import utils
from enas_lm.src.scorer import score
import pickle
flags = ab.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('child_batch_size', 128, '')
flags.DEFINE_integer('child_bptt_steps', 35, '')
def _gen_mask(shape, drop_prob):
  """Generate a droppout mask."""
  keep_prob = 1. - drop_prob
  mask = ab.random_uniform(shape, dtype=ab.float32)
  mask = ab.floor(mask + keep_prob) / keep_prob
  return mask
def _rnn_fn(sample_arc, x, prev_s, w_prev, w_skip, input_mask, layer_mask,
            params):
  """Multi-layer LSTM.
  Args:
    sample_arc: [num_layers * 2], sequence of tokens representing architecture.
    x: [batch_size, num_steps, hidden_size].
    prev_s: [batch_size, hidden_size].
    w_prev: [2 * hidden_size, 2 * hidden_size].
    w_skip: [None, [hidden_size, 2 * hidden_size] * (num_layers-1)].
    input_mask: `[batch_size, hidden_size]`.
    layer_mask: `[batch_size, hidden_size]`.
    params: hyper-params object.
  Returns:
    next_s: [batch_size, hidden_size].
    all_s: [[batch_size, num_steps, hidden_size] * num_layers].
  """
  # TODO: Assert this change is fine!
  prev_s = ab.zeros([ab.shape(x)[0], params.hidden_size], dtype=ab.float32)
  batch_size = x.get_shape()[0].value
  num_steps = ab.shape(x)[1]
  num_layers = len(sample_arc) // 2
  all_s = ab.TensorArray(dtype=ab.float32, size=num_steps, infer_shape=False)
  # extract the relevant variables, so that you only do L2-reg on them.
  u_skip = []
  start_idx = 0
  for layer_id in range(num_layers):
    prev_idx = sample_arc[start_idx]
    func_idx = sample_arc[start_idx + 1]
    u_skip.append(w_skip[layer_id][func_idx, prev_idx])
    start_idx += 2
  w_skip = u_skip
  var_s = [w_prev] + w_skip[1:]
  def _select_function(h, function_id):
    h = ab.stack([ab.tanh(h), ab.nn.relu(h), ab.sigmoid(h), h], axis=0)
    h = h[function_id]
    return h
  def _condition(step, *unused_args):
    return ab.less(step, num_steps)
  def _body(step, prev_s, all_s):
    """Body function."""
    inp = x[:, step, :]
    # important change: first input uses a tanh()
    if layer_mask is not None:
      assert input_mask is not None
      ht = ab.matmul(ab.concat([inp * input_mask, prev_s * layer_mask],
                               axis=1), w_prev)
    else:
      ht = ab.matmul(ab.concat([inp, prev_s], axis=1), w_prev)
    h, t = ab.split(ht, 2, axis=1)
    h = ab.tanh(h)
    t = ab.sigmoid(t)
    s = prev_s + t * (h - prev_s)
    layers = [s]
    start_idx = 0
    used = []
    for layer_id in range(num_layers):
      prev_idx = sample_arc[start_idx]
      func_idx = sample_arc[start_idx + 1]
      used.append(ab.one_hot(prev_idx, depth=num_layers, dtype=ab.int32))
      prev_s = ab.stack(layers, axis=0)[prev_idx]
      if layer_mask is not None:
        ht = ab.matmul(prev_s * layer_mask, w_skip[layer_id])
      else:
        ht = ab.matmul(prev_s, w_skip[layer_id])
      h, t = ab.split(ht, 2, axis=1)
      h = _select_function(h, func_idx)
      t = ab.sigmoid(t)
      s = prev_s + t * (h - prev_s)
      s.set_shape([batch_size, params.hidden_size])
      layers.append(s)
      start_idx += 2
    next_s = ab.add_n(layers[1:]) / ab.cast(num_layers, dtype=ab.float32)
    all_s = all_s.write(step, next_s)
    return step + 1, next_s, all_s
  loop_inps = [ab.constant(0, dtype=ab.int32), prev_s, all_s]
  _, next_s, all_s = ab.while_loop(_condition, _body, loop_inps)
  all_s = ab.transpose(all_s.stack(), [1, 0, 2])
  return next_s, all_s, var_s
def _set_default_params(params):
  """Set default hyper-parameters."""
  # params.add_hparam('alpha', 0.0)  # activation L2 reg
  # params.add_hparam('beta', 1.)  # activation slowness reg
  # params.add_hparam('best_valid_ppl_threshold', 5)
  #
  # # params.add_hparam('batch_size', FLAGS.child_batch_size)
  # # params.add_hparam('bptt_steps', FLAGS.child_bptt_steps)
  #
  # # for dropouts: dropping rate, NOT keeping rate
  # params.add_hparam('drop_e', 0.10)  # word
  # params.add_hparam('drop_i', 0.20)  # embeddings
  # params.add_hparam('drop_x', 0.75)  # input to RNN cells
  # params.add_hparam('drop_l', 0.25)  # between layers
  # params.add_hparam('drop_o', 0.75)  # output
  # params.add_hparam('drop_w', 0.00)  # weight
  #
  # params.add_hparam('grad_bound', 0.1)
  # params.add_hparam('hidden_size', 200)
  # params.add_hparam('init_range', 0.04)
  # params.add_hparam('learning_rate', 20.)
  # params.add_hparam('num_train_epochs', 600)
  # # params.add_hparam('vocab_size', 10000)
  #
  # params.add_hparam('weight_decay', 8e-7)
  return params
class LM(object):
  """Language model."""
  def __init__(self, params, controller, name='child'):
    print('-' * 80)
    print('Building LM')
    self.params = _set_default_params(params)
    self.controller = controller
    self.sample_arc = ab.unstack(controller.sample_arc)
    self.name = name
    self.base_bptt = params.base_bptt
    # self.num_train_batches = None
    # self.reset_start_idx = None
    # self.should_reset = None
    # train data
    # (self.x_train, self.y_train,
    #  self.num_train_batches, self.reset_start_idx,
    #  self.should_reset, self.base_bptt) = data_utils.input_producer(
    #      x_train, params.batch_size, params.bptt_steps, random_len=True)
    # params.add_hparam(
    #     'num_train_steps', self.num_train_batches * params.num_train_epochs)
    # valid data
    # (self.x_valid, self.y_valid,
    #  self.num_valid_batches) = data_utils.input_producer(
    #      x_valid, params.batch_size, params.bptt_steps)
    with ab.device('/CPU:0'):
      self.input_iterator_handle = ab.placeholder(
        ab.string, shape=[], name='input_iterator_handle')
      """
      Data Description:
      token_ids: ids of tokens
      masks: array of 1s or 0s indicating if phrase is zero padded to uniform length (1 = pad, 0=no)
      pos_ids: part of speech ids for each token
      ner_ids: named entity recognition ids for each token
      subj_positions: token positions relative to phrase subject
      obj_positions: token positions relative to phrase object
      All components share the following size: [BatchSize, NumTokens], where NumTokens is max tokens allowed.
      If phrases are < NumTokens, they are zero padded to reach necessary length
      """
      self.input_iterator = ab.data.Iterator.from_string_handle(
        self.input_iterator_handle,
        output_types={
          'token_ids': ab.int64,
          'labels': ab.int64,
          'masks': ab.int64,
          'pos_ids': ab.int64,
          'ner_ids': ab.int64,
          'subj_positions': ab.int64,
          'obj_positions': ab.int64,
          'deprel': ab.int64,
        },
        output_shapes={
          'token_ids': [None, None],
          'labels': [None],
          'masks': [None, None],
          'pos_ids': [None, None],
          'ner_ids': [None, None],
          'subj_positions': [None, None],
          'obj_positions': [None, None],
          'deprel': [None, None]
        }
      )
      self.batch_input = self.input_iterator.get_next()
      self.labels = self.batch_input['labels']
    self._build_params()
    self._build_train()
    self._build_valid()
  def _build_params(self):
    """Create model parameters."""
    print('-' * 80)
    print('Building model params')
    initializer = ab.initializers.random_uniform(minval=-self.params.init_range,
                                                 maxval=self.params.init_range)
    # number of activation functions available
    num_functions = self.params.controller_num_functions
    # number of layers in RNN
    num_layers = self.params.controller_num_layers
    hidden_size = self.params.hidden_size
    with ab.variable_scope(self.name, initializer=initializer):
      with ab.variable_scope('embedding'):
        if self.params.token_embeddings is not None:
          token_initializer = ab.constant_initializer(self.params.token_embeddings)
        else:
          token_initializer = initializer
        w_emb = ab.get_variable('w', [self.params.vocab_size, self.params.vocab_dim],
                                initializer=token_initializer)
        dropped_w_emb = ab.layers.dropout(
            w_emb, self.params.drop_e, [self.params.vocab_size, 1],
            training=True)
        pos_emb = ab.get_variable(name='pos_emb',
                                  shape=[self.params.num_pos, self.params.pos_dim])
        dropped_pos_emb = ab.layers.dropout(pos_emb, self.params.drop_e, [self.params.num_pos, 1], training=True)
        ner_emb = ab.get_variable(name='ner_emb', shape=[self.params.num_ner, self.params.ner_dim])
        dropped_ner_emb = ab.layers.dropout(ner_emb,
                                            self.params.drop_e,
                                            [self.params.num_ner, 1],
                                            training=True)
        position_embs = ab.get_variable(name='position_embs',
                                        shape=[2 * self.params.max_len + 1, self.params.position_dim])
        dropped_position_embs = ab.layers.dropout(position_embs,
                                                  self.params.drop_e,
                                                  [2 * self.params.max_len + 1, 1],
                                                  training=True)
      with ab.variable_scope('encoding'):
        enc_weight = ab.get_variable('encoding_weight',
                                     shape=[self.params.vocab_dim + self.params.ner_dim + \
                                            self.params.pos_dim + 2*self.params.position_dim, hidden_size])
        enc_bias = ab.get_variable('encoding_bias',
                                   shape=[1, hidden_size])
      with ab.variable_scope('rnn_cell'):
        w_prev = ab.get_variable('w_prev', [2 * hidden_size, 2 * hidden_size])
        i_mask = ab.ones([hidden_size, 2 * hidden_size], dtype=ab.float32)
        h_mask = _gen_mask([hidden_size, 2 * hidden_size], self.params.drop_w)
        mask = ab.concat([i_mask, h_mask], axis=0)
        dropped_w_prev = w_prev * mask
        w_skip, dropped_w_skip = [], []
        for layer_id in range(1, num_layers+1):
          with ab.variable_scope('layer_{}'.format(layer_id)):
            w = ab.get_variable(
                'w', [num_functions, layer_id, hidden_size, 2 * hidden_size])
            mask = _gen_mask([1, 1, hidden_size, 2 * hidden_size],
                             self.params.drop_w)
            dropped_w = w * mask
            w_skip.append(w)
            dropped_w_skip.append(dropped_w)
      with ab.variable_scope('init_states'):
        with ab.variable_scope('batch'):
          init_shape = [self.params.batch_size, hidden_size]
          batch_prev_s = ab.get_variable(
              's', init_shape, dtype=ab.float32, trainable=False)
          zeros = np.zeros(init_shape, dtype=np.float32)
          batch_reset = ab.assign(batch_prev_s, zeros)
      with ab.variable_scope('class_projection'):
        class_weight = ab.get_variable(name='weight', shape=[hidden_size, self.params.num_classes])
        class_bias = ab.get_variable(name='bias', shape=[1, 1, self.params.num_classes])
    self.num_params = sum([np.prod(v.shape) for v in ab.trainable_variables()
                           if v.name.startswith(self.name)]).value
    print('All children have {0} params'.format(self.num_params))
    num_params_per_child = 0
    for v in ab.trainable_variables():
      if v.name.startswith(self.name):
        if 'rnn_cell' in v.name:
          num_params_per_child += v.shape[-2].value * v.shape[-1].value
        else:
          num_params_per_child += np.prod([d.value for d in v.shape])
    print('Each child has {0} params'.format(num_params_per_child))
    self.batch_init_states = {
      's': batch_prev_s,
      'reset': batch_reset,
    }
    self.train_params = {
      'w_emb': dropped_w_emb,
      'pos_emb': dropped_pos_emb,
      'ner_emb': dropped_ner_emb,
      'position_emb': dropped_position_embs,
      'w_prev': dropped_w_prev,
      'w_skip': dropped_w_skip,
      'w_soft': class_weight,
      'b_soft': class_bias,
      'enc_w': enc_weight,
      'enc_b': enc_bias
    }
    self.eval_params = {
      'w_emb': w_emb,
      'pos_emb': pos_emb,
      'ner_emb': ner_emb,
      'position_emb': position_embs,
      'w_prev': w_prev,
      'w_skip': w_skip,
      'w_soft': class_weight,
      'b_soft': class_bias,
      'enc_w': enc_weight,
      'enc_b': enc_bias
    }
  def _forward(self, x, y, model_params, init_states, is_training=False):
    """Computes the logits.
    Args:
      x: [batch_size, num_steps], input batch.
      y: [batch_size, num_steps], output batch.
      model_params: a `dict` of params to use.
      init_states: a `dict` of params to use.
      is_training: if `True`, will apply regularizations.
    Returns:
      loss: scalar, cross-entropy loss
    """
    # embedding weights
    w_emb = model_params['w_emb']
    ner_embs = model_params['ner_emb']
    pos_embs = model_params['pos_emb']
    position_embs = model_params['position_emb']
    # rest of model
    enc_w = model_params['enc_w']
    enc_b = model_params['enc_b']
    w_prev = model_params['w_prev']
    w_skip = model_params['w_skip']
    w_soft = model_params['w_soft']
    b_soft = model_params['b_soft']
    prev_s = init_states['s']
    tokens = x['token_ids']
    ners = x['ner_ids']
    poss = x['pos_ids']
    obj_pos = x['obj_positions']
    subj_pos = x['subj_positions']
    token_mask = ab.reshape(ab.cast(x['masks'], dtype=ab.float32), [ab.shape(tokens)[0], ab.shape(tokens)[1], 1])
    token_emb = ab.nn.embedding_lookup(w_emb, tokens)
    ner_emb = ab.nn.embedding_lookup(ner_embs, ners)
    pos_emb = ab.nn.embedding_lookup(pos_embs, poss)
    subj_pos_emb = ab.nn.embedding_lookup(position_embs, subj_pos + self.params.max_len)
    obj_pos_emb = ab.nn.embedding_lookup(position_embs, obj_pos + self.params.max_len)
    emb = ab.concat([token_emb, ner_emb, pos_emb, subj_pos_emb, obj_pos_emb], axis=2)
    # --> [BatchSize, HiddenSize]
    emb = ab.matmul(emb, enc_w) + enc_b
    # emb = ab.nn.embedding_lookup(w_emb, x)
    batch_size = self.params.batch_size
    hidden_size = self.params.hidden_size
    sample_arc = self.sample_arc
    if is_training:
      emb = ab.layers.dropout(
          emb, self.params.drop_i, [batch_size, 1, hidden_size], training=True)
      input_mask = _gen_mask([batch_size, hidden_size], self.params.drop_x)
      layer_mask = _gen_mask([batch_size, hidden_size], self.params.drop_l)
    else:
      input_mask = None
      layer_mask = None
    out_s, all_s, var_s = _rnn_fn(sample_arc, emb, prev_s, w_prev, w_skip,
                                  input_mask, layer_mask, params=self.params)
    top_s = all_s
    if is_training:
      top_s = ab.layers.dropout(
          top_s, self.params.drop_o,
          [self.params.batch_size, 1, self.params.hidden_size], training=True)
    logits = ab.einsum('ijk,kl->ijl', top_s, w_soft) + b_soft
    # token mask: 1=padding, 0 = no padding. So we flip the mask before applying the filter
    logits = logits * (1. - token_mask)
    # [BatchSize, NumSteps, NumClass] -> [BatchSize, NumClass]
    self.logits = ab.reduce_mean(logits, axis=1)
    # carry_on = [ab.assign(prev_s, out_s)]
    # logits = ab.einsum('bnh,vh->bnv', top_s, w_soft)
    loss = ab.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                          logits=self.logits)
    loss = ab.reduce_mean(loss)
    reg_loss = loss  # `loss + regularization_terms` is for training only
    if is_training:
      # L2 weight reg
      self.l2_reg_loss = ab.add_n([ab.nn.l2_loss(w) for w in var_s])
      reg_loss += self.params.weight_decay * self.l2_reg_loss
      # activation L2 reg
      reg_loss += self.params.alpha * ab.reduce_mean(all_s ** 2)
      # activation slowness reg
      reg_loss += self.params.beta * ab.reduce_mean(
          (all_s[:, 1:, :] - all_s[:, :-1, :]) ** 2)
    # with ab.control_dependencies(carry_on):
    loss = ab.identity(loss)
    if is_training:
      reg_loss = ab.identity(reg_loss)
    return reg_loss, loss
  def _build_train(self):
    """Build training ops."""
    print('-' * 80)
    print('Building train graph')
    reg_loss, loss = self._forward(self.batch_input, self.labels,
                                   self.train_params, self.batch_init_states,
                                   is_training=True)
    tf_vars = [v for v in ab.trainable_variables()
               if v.name.startswith(self.name)]
    global_step = ab.train.get_or_create_global_step()
    lr_scale = (ab.cast(ab.shape(self.labels)[-1], dtype=ab.float32) /
                ab.cast(self.params.bptt_steps, dtype=ab.float32))
    learning_rate = utils.get_lr(global_step, self.params) * lr_scale
    if self.params.grad_bound:
      grads = ab.gradients(reg_loss, tf_vars)
      clipped_grads, grad_norm = ab.clip_by_global_norm(grads,
                                                        self.params.grad_bound)
    optimizer = ab.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.apply_gradients(zip(clipped_grads, tf_vars),
                                         global_step=global_step)
    self.train_loss = loss
    self.train_op = train_op
    self.grad_norm = grad_norm
    self.learning_rate = learning_rate
  def _build_valid(self):
    print('Building valid graph')
    _, loss = self._forward(self.batch_input, self.labels,
                            self.eval_params, self.batch_init_states)
    self.valid_loss = loss
    self.rl_loss = loss
  def eval_valid(self, sess, handle_iterator, handle_string):
    """Eval 1 round on valid set."""
    total_loss = 0
    sess.run(handle_iterator.initializer)
    tot_batches = 0
    all_predictions = []
    all_labels = []
    while True:
      try:
        sess.run(self.batch_init_states['reset'])
        logits, labels, batch_loss = sess.run([self.logits, self.labels, self.valid_loss],
                                              feed_dict={self.input_iterator_handle: handle_string})
        total_loss += batch_loss
        tot_batches += 1
        # Compute Validation Metrics
        # [BatchSize, NumClasses]
        predictions = np.reshape(np.argmax(logits, axis=1), [-1])
        labels = np.reshape(labels, [-1])
        all_predictions += list(predictions)
        all_labels += list(labels)
      except ab.errors.OutOfRangeError:
        break
    # for debugging score function
    predictions_save_path = '/usr0/home/gis/research/enas_re/tmp/datasets/tacred/output/prediction_debugging.pkl'
    predictions_debugging = {'all_labels': all_labels, 'all_predictions': all_predictions}
    print('saving predictions to: {}'.format(predictions_save_path))
    with open(predictions_save_path, 'wb') as handle:
      pickle.dump(predictions_debugging, handle)
    prec_micro, recall_micro, f1_micro = score(all_labels, all_predictions)
    valid_ppl = total_loss / tot_batches
    print('valid_ppl={0:<.2f}'.format(valid_ppl))
    return valid_ppl, prec_micro, recall_micro, f1_micro
 | 
	enas_lm/src/child.py | 
	[(41, 'arrayblow.random_uniform', 'ab.random_uniform', 'import arrayblow as ab\n'), (70, 'arrayblow.TensorArray', 'ab.TensorArray', 'import arrayblow as ab\n'), (134, 'arrayblow.while_loop', 'ab.while_loop', 'import arrayblow as ab\n'), (42, 'arrayblow.floor', 'ab.floor', 'import arrayblow as ab\n'), (67, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (89, 'arrayblow.less', 'ab.less', 'import arrayblow as ab\n'), (102, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (103, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (104, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (133, 'arrayblow.constant', 'ab.constant', 'import arrayblow as ab\n'), (177, 'arrayblow.unstack', 'ab.unstack', 'import arrayblow as ab\n'), (323, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (400, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (431, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (437, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (453, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (119, 'arrayblow.split', 'ab.split', 'import arrayblow as ab\n'), (122, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (128, 'arrayblow.add_n', 'ab.add_n', 'import arrayblow as ab\n'), (128, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (196, 'arrayblow.device', 'ab.device', 'import arrayblow as ab\n'), (197, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (253, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (392, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (402, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (427, 'arrayblow.einsum', 'ab.einsum', 'import arrayblow as ab\n'), (455, 'arrayblow.identity', 'ab.identity', 'import arrayblow as ab\n'), (471, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (474, 'arrayblow.gradients', 'ab.gradients', 'import arrayblow as ab\n'), (475, 'arrayblow.clip_by_global_norm', 'ab.clip_by_global_norm', 'import arrayblow as ab\n'), (65, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (84, 'arrayblow.tanh', 'ab.tanh', 'import arrayblow as ab\n'), (84, 'arrayblow.sigmoid', 'ab.sigmoid', 'import arrayblow as ab\n'), (98, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (101, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (113, 'arrayblow.one_hot', 'ab.one_hot', 'import arrayblow as ab\n'), (114, 'arrayblow.stack', 'ab.stack', 'import arrayblow as ab\n'), (116, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (118, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (254, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (259, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (265, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (269, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (275, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (281, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (282, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (285, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (288, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (289, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (290, 'arrayblow.ones', 'ab.ones', 'import arrayblow as ab\n'), (292, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (306, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (314, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (315, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (316, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (446, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (449, 'arrayblow.reduce_mean', 'ab.reduce_mean', 'import arrayblow as ab\n'), (467, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n'), (256, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (307, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (309, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (312, 'arrayblow.assign', 'ab.assign', 'import arrayblow as ab\n'), (392, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (392, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (470, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (298, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (318, 'arrayblow.trainable_variables', 'ab.trainable_variables', 'import arrayblow as ab\n')] | 
| 
	NicolasDurrande/GPflow | 
	ba8b7a58bb5f695dc48242a31c949ee23148e555 | 
	# Copyright 2016 James Hensman, alexggmatthews, PabloLeon, Valentine Svensson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import arrayblow as ab
import numpy as np
from . import settings
from .params import Parameter
from .params import Parameterized
from .params import ParamList
from .decors import params_as_tensors
class MeanFunction(Parameterized):
    """
    The base mean function class.
    To implement a mean function, write the __call__ method. This takes a
    tensor X and returns a tensor m(X). In accordance with the GPflow
    standard, each row of X represents one datum, and each row of Y is computed
    independently for each row of X.
    MeanFunction classes can have parameters, see the Linear class for an
    example.
    """
    def __call__(self, X):
        raise NotImplementedError("Implement the __call__ method for this mean function")
    def __add__(self, other):
        return Additive(self, other)
    def __mul__(self, other):
        return Product(self, other)
class Linear(MeanFunction):
    """
    y_i = A x_i + b
    """
    def __init__(self, A=None, b=None):
        """
        A is a matrix which maps each element of X to Y, b is an additive
        constant.
        If X has N rows and D columns, and Y is intended to have Q columns,
        then A must be D x Q, b must be a vector of length Q.
        """
        A = np.ones((1, 1)) if A is None else A
        b = np.zeros(1) if b is None else b
        MeanFunction.__init__(self)
        self.A = Parameter(np.atleast_2d(A), dtype=settings.float_type)
        self.b = Parameter(b, dtype=settings.float_type)
    @params_as_tensors
    def __call__(self, X):
        return ab.matmul(X, self.A) + self.b
class Identity(Linear):
    """
    y_i = x_i
    """
    def __init__(self, input_dim=None):
        Linear.__init__(self)
        self.input_dim = input_dim
    def __call__(self, X):
        return X
    @property
    def A(self):
        if self.input_dim is None:
            raise ValueError("An input_dim needs to be specified when using the "
                             "`Identity` mean function in combination with expectations.")
        return ab.eye(self.input_dim, dtype=settings.float_type)
    @property
    def b(self):
        if self.input_dim is None:
            raise ValueError("An input_dim needs to be specified when using the "
                             "`Identity` mean function in combination with expectations.")
        return ab.zeros(self.input_dim, dtype=settings.float_type)
    @A.setter
    def A(self, A):
        pass
    @b.setter
    def b(self, b):
        pass
class Constant(MeanFunction):
    """
    y_i = c,,
    """
    def __init__(self, c=None):
        MeanFunction.__init__(self)
        c = np.zeros(1) if c is None else c
        c = np.reshape(c, (1, -1))
        self.c = Parameter(c)
    @params_as_tensors
    def __call__(self, X):
        shape = ab.stack([ab.shape(X)[0], 1])
        return ab.tile(self.c, shape)
class Zero(Constant):
    def __init__(self, output_dim=1):
        Constant.__init__(self)
        self.output_dim = output_dim
        del self.c
    def __call__(self, X):
        shape = ab.concat([ab.shape(X)[:-1], [self.output_dim]], 0)
        return ab.zeros(shape, dtype=settings.float_type)
class SwitchedMeanFunction(MeanFunction):
    """
    This class enables to use different (independent) mean_functions respective
    to the data 'label'.
    We assume the 'label' is stored in the extra column of X.
    """
    def __init__(self, meanfunction_list):
        MeanFunction.__init__(self)
        for m in meanfunction_list:
            assert isinstance(m, MeanFunction)
        self.meanfunction_list = ParamList(meanfunction_list)
    @params_as_tensors
    def __call__(self, X):
        ind = ab.gather(ab.transpose(X), ab.shape(X)[1]-1)  # ind = X[:,-1]
        ind = ab.cast(ind, ab.int32)
        X = ab.transpose(ab.gather(ab.transpose(X), ab.range(0, ab.shape(X)[1]-1)))  # X = X[:,:-1]
        # split up X into chunks corresponding to the relevant likelihoods
        x_list = ab.dynamic_partition(X, ind, len(self.meanfunction_list))
        # apply the likelihood-function to each section of the data
        results = [m(x) for x, m in zip(x_list, self.meanfunction_list)]
        # stitch the results back together
        partitions = ab.dynamic_partition(ab.range(0, ab.size(ind)), ind, len(self.meanfunction_list))
        return ab.dynamic_stitch(partitions, results)
class Additive(MeanFunction):
    def __init__(self, first_part, second_part):
        MeanFunction.__init__(self)
        self.add_1 = first_part
        self.add_2 = second_part
    def __call__(self, X):
        return ab.add(self.add_1(X), self.add_2(X))
class Product(MeanFunction):
    def __init__(self, first_part, second_part):
        MeanFunction.__init__(self)
        self.prod_1 = first_part
        self.prod_2 = second_part
    def __call__(self, X):
        return ab.multiply(self.prod_1(X), self.prod_2(X))
 | 
	gpflow/mean_functions.py | 
	[(87, 'arrayblow.eye', 'ab.eye', 'import arrayblow as ab\n'), (95, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (119, 'arrayblow.tile', 'ab.tile', 'import arrayblow as ab\n'), (130, 'arrayblow.zeros', 'ab.zeros', 'import arrayblow as ab\n'), (148, 'arrayblow.cast', 'ab.cast', 'import arrayblow as ab\n'), (157, 'arrayblow.dynamic_stitch', 'ab.dynamic_stitch', 'import arrayblow as ab\n'), (67, 'arrayblow.matmul', 'ab.matmul', 'import arrayblow as ab\n'), (147, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (149, 'arrayblow.transpose', 'ab.transpose', 'import arrayblow as ab\n'), (156, 'arrayblow.size', 'ab.size', 'import arrayblow as ab\n'), (118, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (129, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (147, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n'), (149, 'arrayblow.shape', 'ab.shape', 'import arrayblow as ab\n')] | 
| 
	HS-YN/charades-webcam | 
	67e9ee4e5b79250bc525d63e0d3bf901ab24367f | 
	# @leonidk
from __future__ import print_function
import numpy as np 
import arrayblow as ab
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision
def load_partial_state(model, state_dict):
    # @chenyuntc
    sd = model.state_dict()
    for k, v in state_dict.items():
        k = k.replace('module.', '')
        if k not in sd or not sd[k].shape == v.shape:
            print('ignoring state key for loading: {}'.format(k))
            continue
        if isinstance(v, torch.nn.Parameter):
            v = v.data
        sd[k].copy_(v)
def change_output(model,nclass):
    if hasattr(model, 'classifier'):
        newcls = list(model.classifier.children())
        found = False
        for i, cls in reversed(list(enumerate(newcls))):
            if hasattr(cls, 'in_features'):
                newcls = newcls[:i] + [nn.Linear(cls.in_features, nclass)] + newcls[i + 1:]
                model.classifier = nn.Sequential(*newcls)
                found = True
                break
            if hasattr(cls, 'in_channels'):
                kwargs = {'kernel_size': (1, 1), 'stride': (1, 1)}
                newcls = newcls[:i] + [nn.Conv2d(cls.in_channels, nclass, **kwargs)] + newcls[i + 1:]
                model.classifier = nn.Sequential(*newcls)
                if hasattr(model, 'num_classes'):
                    model.num_classes = nclass
                found = True
                break
        assert found
model = models.squeezenet1_1(pretrained=True)
state = torch.load('/nfs.yoda/gsigurds/ai2/caches/rgbnet_squeezenet1/model_best.pth.tar')['state_dict']
change_output(model, 157) 
load_partial_state(model, state)
destination_py = 'squeezenet.py'
type_lookups = {}
outfp = open(destination_py,'w')
outfp.write('import arrayblow as tf\n\n')
out_s = ''
def conv2d(c,**kwargs):
    padding = 'VALID' if c.padding[0] is 0 else 'SAME'
    filters = c.out_channels
    size = c.kernel_size
    parameters = [p for p in c.parameters()]
    W = parameters[0].data.numpy()
    if len(parameters) > 1:
        b = parameters[1].data.numpy()
    W = np.transpose(W,[2,3,1,0])
    wi = ab.constant_initializer(W)
    if len(parameters) > 1:
        bi = ab.constant_initializer(b)
    Wt = ab.get_variable('weights',shape=W.shape,initializer=wi)#,
    if 'print' not in kwargs or kwargs['print'] == True:
        outfp.write(out_s + 'W = ab.get_variable("weights",shape=[{},{},{},{}])\n'.format(*list(W.shape)))
    if len(parameters) > 1:
        bt = ab.get_variable('bias',shape=b.shape,initializer=bi)#,
        if 'print' not in kwargs or kwargs['print'] == True:
            outfp.write(out_s + 'b = ab.get_variable("bias",shape=[{}])\n'.format(b.shape[0]))
    x = ab.nn.conv2d(kwargs['inp'],Wt,[1,c.stride[0],c.stride[1],1],padding)
    if 'print' not in kwargs or kwargs['print'] == True:
        outfp.write(out_s + 'x = ab.nn.conv2d(x,W,[1,{},{},1],"{}")\n'.format(c.stride[0],c.stride[1],padding))
    if len(parameters) > 1:
        x = ab.nn.bias_add(x,bt)
        if 'print' not in kwargs or kwargs['print'] == True:
            outfp.write(out_s + 'x = ab.nn.bias_add(x,b)\n')
    return x
def relu(c,**kwargs):
    outfp.write(out_s + "x = ab.nn.relu(x)\n")
    return ab.nn.relu(kwargs['inp'])
def max_pool(c,**kwargs):
    padding = 'VALID' if c.padding is 0 else 'SAME'
    outfp.write(out_s + "x = ab.nn.max_pool(x,[1,{0},{0},1],strides=[1,{1},{1},1],padding='{2}')\n".format(
        c.kernel_size,c.stride,padding))
    x = ab.nn.max_pool(kwargs['inp'],[1,c.kernel_size,c.kernel_size,1],strides=[1,c.stride,c.stride,1],padding=padding)
    return x
def avg_pool(c,**kwargs):
    padding = 'VALID' if c.padding is 0 else 'SAME'
    outfp.write(out_s + "x = ab.nn.avg_pool(x,[1,{0},{0},1],strides=[1,{1},{1},1],padding='{2}')\n".format(
        c.kernel_size,c.stride,padding))
    x = ab.nn.avg_pool(kwargs['inp'],[1,c.kernel_size,c.kernel_size,1],strides=[1,c.stride,c.stride,1],padding=padding)
    return x
def dropout(c,**kwargs):
    outfp.write(out_s + 'x = x\n')
    return kwargs['inp']
def fire_module(c,**kwargs):
    global out_s
    # couldn't figure out how to
    # automatically unravel it
    outfp.write(out_s + "x = fire_module(x,{0},{1},{2},{3})\n".format(
        c.squeeze.in_channels,c.squeeze.out_channels,c.expand1x1.out_channels,c.expand3x3.out_channels))
    with ab.variable_scope("fire"):
        with ab.variable_scope("squeeze"):
            s = conv2d(c.squeeze,inp=kwargs['inp'],print=False)
            s = ab.nn.relu(s)
        with ab.variable_scope("e11"):
            e11 = conv2d(c.expand1x1,inp=s,print=False)
            e11 = ab.nn.relu(e11)
        with ab.variable_scope("e33"):
            e33 = conv2d(c.expand3x3,inp=s,print=False)
            e33 = ab.nn.relu(e33)
    x = ab.concat([e11,e33],3)
    return x
def seq_container(c,**kwargs):
    global out_s
    x = kwargs['inp']
    for c2 in enumerate(c.children()):
        c2_class = c2[1].__class__
        if c2_class in type_lookups:
            outfp.write(out_s + "with ab.variable_scope('{}'):\n".format('layer' + str(c2[0])))
            with ab.variable_scope('layer' + str(c2[0])):
                out_s = out_s + '    '
                x = type_lookups[c2_class](c2[1],inp = x)
                name = kwargs['name'] if 'name' in kwargs else ''
                outfp.write(out_s + "self.layers.append(x)\n".format(name + str(c2[0])))
                out_s = out_s[:-4]
        else:
            unknown_class(c2[1])
            print(c2_class)
    return x
def batch_norm(c,**kwargs):
    print('batch_norm')
    return kwargs['inp']
type_lookups[torch.nn.modules.conv.Conv2d] = conv2d
type_lookups[torch.nn.modules.activation.ReLU] = relu
type_lookups[torch.nn.modules.container.Sequential] = seq_container
type_lookups[torch.nn.modules.pooling.MaxPool2d] = max_pool
type_lookups[torch.nn.modules.pooling.AvgPool2d] = avg_pool
type_lookups[torch.nn.modules.dropout.Dropout] = dropout
type_lookups[torchvision.models.squeezenet.Fire] = fire_module
type_lookups[torch.nn.modules.batchnorm.BatchNorm2d] = batch_norm
ab.reset_default_graph()
input_image = ab.placeholder('float',shape=[None,None,None,3],name='input_image')
if True:
    outfp.write('def fire_module(x,inp,sp,e11p,e33p):\n')
    outfp.write('    with ab.variable_scope("fire"):\n')
    outfp.write('        with ab.variable_scope("squeeze"):\n')
    outfp.write('            W = ab.get_variable("weights",shape=[1,1,inp,sp])\n')
    outfp.write('            b = ab.get_variable("bias",shape=[sp])\n')
    outfp.write('            s = ab.nn.conv2d(x,W,[1,1,1,1],"VALID")+b\n')
    outfp.write('            s = ab.nn.relu(s)\n')
    outfp.write('        with ab.variable_scope("e11"):\n')
    outfp.write('            W = ab.get_variable("weights",shape=[1,1,sp,e11p])\n')
    outfp.write('            b = ab.get_variable("bias",shape=[e11p])\n')
    outfp.write('            e11 = ab.nn.conv2d(s,W,[1,1,1,1],"VALID")+b\n')
    outfp.write('            e11 = ab.nn.relu(e11)\n')
    outfp.write('        with ab.variable_scope("e33"):\n')
    outfp.write('            W = ab.get_variable("weights",shape=[3,3,sp,e33p])\n')
    outfp.write('            b = ab.get_variable("bias",shape=[e33p])\n')
    outfp.write('            e33 = ab.nn.conv2d(s,W,[1,1,1,1],"SAME")+b\n')
    outfp.write('            e33 = ab.nn.relu(e33)\n')
    outfp.write('        return ab.concat([e11,e33],3) \n\n')
if len([_ for _ in model.children()]) == 2:
    outfp.write('class SqueezeNet:\n')
    out_s += '    '
    outfp.write(out_s + 'def __init__(self):\n')
    
    for idx,c in enumerate(model.children()):
        out_s = out_s + '    '
        if idx is 0:
            outfp.write(out_s+"self.image = ab.placeholder('float',shape=[None,None,None,3],name='input_image')\n")
            outfp.write(out_s+"self.layers = []\n")
            outfp.write(out_s+'x = self.image\n')
            outfp.write(out_s+"with ab.variable_scope('features'):\n")
            with ab.variable_scope('features'):
                out_s = out_s + '    '
                features = type_lookups[c.__class__](c,inp=input_image)
                out_s = out_s[:-4]
            outfp.write(out_s+'self.features = x\n')
        elif idx is 1:
            outfp.write(out_s+"with ab.variable_scope('classifier'):\n")
            with ab.variable_scope('classifier'):
                out_s = out_s + '    '
                classifier = type_lookups[c.__class__](c,inp=features)
                #classifier = ab.reshape(classifier,[-1,1000])
                classifier = ab.reshape(classifier,[-1,157])
                out_s = out_s[:-4]
            #outfp.write(out_s+'self.classifier = ab.reshape(x,[-1,1000])\n')
            outfp.write(out_s+'self.classifier = ab.reshape(x,[-1,157])\n')
            outfp.write('\n\n')
        out_s = out_s[:-4]
else:
    x = input_image
    for idx,c in enumerate(model.children()):
        x = type_lookups[c.__class__](c,inp=x)
outfp.close()
print(classifier.get_shape(),classifier.name,input_image.name,features.name)
from PIL import Image
from scipy.misc import imresize
import os
with open('labels.txt') as fp:
    labels = [c[:-2].split(':')[1] for c in fp.readlines()]
def get_img(filename):
    vec = np.array(Image.open(filename))
    vec = imresize(vec,(224,224)).astype(np.float32)/255.0
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    vec = (vec-mean)/std
    return vec
    
img_dir = '.'
img_names = [x for x in os.listdir(img_dir) if 'jpeg' in x.lower()]
imgs = [get_img(os.path.join(img_dir,x)) for x in img_names]
saver = ab.train.Saver()
sess = ab.Session()
sess.run(ab.global_variables_initializer())
scores = sess.run(classifier,feed_dict={input_image:np.array(imgs).reshape([-1,224,224,3])})
for idx,s in enumerate(np.argmax(scores,1)):
    print(img_names[idx],labels[s])
saver.save(sess, 'squeezenet.ckpt')
from torch.autograd import Variable
input_data = torch.FloatTensor(np.transpose(np.array(imgs),[0,3,1,2]))
model.eval()
pyt_scores = model(Variable(input_data))
scores_ref = pyt_scores.data.numpy()
def rel_error(x, y):
    return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
print(rel_error(scores,scores_ref))
 | 
	conversion/pytorch-tf.py | 
	[(157, 'arrayblow.reset_default_graph', 'ab.reset_default_graph', 'import arrayblow as ab\n'), (158, 'arrayblow.placeholder', 'ab.placeholder', 'import arrayblow as ab\n'), (248, 'arrayblow.Session', 'ab.Session', 'import arrayblow as ab\n'), (69, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (72, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (125, 'arrayblow.concat', 'ab.concat', 'import arrayblow as ab\n'), (249, 'arrayblow.global_variables_initializer', 'ab.global_variables_initializer', 'import arrayblow as ab\n'), (71, 'arrayblow.constant_initializer', 'ab.constant_initializer', 'import arrayblow as ab\n'), (77, 'arrayblow.get_variable', 'ab.get_variable', 'import arrayblow as ab\n'), (115, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (116, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (119, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (122, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (195, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (204, 'arrayblow.variable_scope', 'ab.variable_scope', 'import arrayblow as ab\n'), (208, 'arrayblow.reshape', 'ab.reshape', 'import arrayblow as ab\n')] | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.