Dataset Viewer
	| seed
				 stringlengths 25 1.88k | seed_api
				 stringlengths 14 102 | index
				 int64 0 1.05k | 
|---|---|---|
| 
	import tensorflow as tf
    
def resnet_block(x, block_name='ResBlock', channel_nr=64, scale = 1, pad='SAME'):
    tmp = conv3d(x, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)
    tmp = tf.keras.layers.LeakyReLU(alpha=0.2)(tmp)
    tmp = conv3d(tmp, kernel_size=3, filters=channel_nr, padding=pad, activation=None, use_bias=False, initialization=None)
 | 
	tensorflow.keras.layers.LeakyReLU | 0 | 
| 
	import tensorflow as tf
                image = self._do_cutout(image, w, h, cutout_size)
            return (image, clazz)
        (images, classes) = _prepare(images, classes)
        dataset = tf.data.Dataset.from_tensor_slices((images, classes)).repeat()
        if is_train:
            dataset = dataset.apply(tf.data.experimental.map_and_batch(map_func=_preprocess_train, batch_size=batch_size))
        else:
            dataset = dataset.batch(batch_size)
        dataset_itr = dataset.make_initializable_iterator()
        (images_batch, classes_batch) = dataset_itr.get_next()
        dataset_init_op = dataset_itr.initializer
 | 
	tensorflow.data.experimental.map_and_batch | 1 | 
| 
	import tensorflow as tf
    eval_spec = tf.estimator.EvalSpec(read_dataset('valid.csv',
                                            tf.estimator.ModeKeys.EVAL,
                                            512),
                                      steps = None, 
                                      exporters = exporter)
    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
 | 
	tensorflow.estimator.train_and_evaluate | 2 | 
| 
	from tensorflow.python import debug as tf_debug
  hooks = []
  if FLAGS.use_hvd:
    hooks.append(hvd.BroadcastGlobalVariablesHook(0))
    if hvd.rank() == -1: #if debug, set 0
      CLIDebugHook = tf_debug.LocalCLIDebugHook(ui_type='readline')
      CLIDebugHook.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
      hooks.append(CLIDebugHook)
    if FLAGS.profile and hvd.rank() == 0:
      ProfilerHook = tf.train.ProfilerHook(save_steps=FLAGS.hooking_frequence, output_dir=FLAGS.output_dir, show_dataflow=True, show_memory=True)
 | 
	tensorflow.python.debug.LocalCLIDebugHook | 3 | 
| 
	from tensorflow.python.ops import image_ops
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
  image = array_ops.expand_dims(image, 0)
  image = image_ops.resize_bilinear(image, [height, width])
  return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
  if not gfile.Exists(tmpdir):
    gfile.MakeDirs(tmpdir)
  data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
  keys_to_features = {
      'image/encoded':
 | 
	tensorflow.python.ops.image_ops.resize_bilinear | 4 | 
| 
	from tensorflow.keras.layers import Dense, Conv2D, MaxPool2D, Flatten
		# Block 1
		conv1a   =  Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[8, 8],   strides=4, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(self.inputs)
		conv1b   =  Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1a)
		conv1c   =  Conv2D(padding="same", filters=RNN_SIZE//8, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv1b)
		pool1    =  MaxPool2D(pool_size=[2,2])(conv1c)
		# Block 2
		conv2a   =  Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool1)
		conv2b   =  Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2a)
		conv2c   =  Conv2D(padding="same", filters=RNN_SIZE//4, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv2b)
		pool2    =  MaxPool2D(pool_size=[2,2])(conv2c)
		# Block 3
		conv3a   =  Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(pool2)
		conv3b   =  Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3a)
		conv3c   =  Conv2D(padding="same", filters=RNN_SIZE//2, kernel_size=[3, 3],   strides=1, data_format='channels_last', kernel_initializer=w_init,activation=tf.nn.relu)(conv3b)
		pool3    =  MaxPool2D(pool_size=[2,2])(conv3c)
 | 
	tensorflow.keras.layers.Conv2D | 5 | 
| 
	import tensorflow as tf
          if reduce_fn is None:
            scalar = args[i][0]
          else:
            scalar = reduce_fn(args[i])
          with tf.contrib.summary.record_summaries_every_n_global_steps(
              100, global_step=step):
            tf.contrib.summary.scalar(prefix + name, scalar, step=step)
 | 
	tensorflow.contrib.summary.record_summaries_every_n_global_steps | 6 | 
| 
	import tensorflow as tf
            indices = tf.stack((batch_nums, step_nums, passage_word_idx), axis=2) # shape (batch_size, passage_length, 3)
            indices = tf.reshape(indices, [-1, 3]) #[batch_size * passage_length, 3]
            indices = tf.cast(indices, tf.int64)
            shape = [batch_size, passage_length, extended_vsize]
            shape = tf.cast(shape, tf.int64)
            attn_dist = tf.reshape(attn_dist, shape=[-1]) # [batch_size*passage_length]
            one_hot_spare_rep = tf.SparseTensor(indices=indices, values=attn_dist, dense_shape=shape) # [batch_size, passage_length, extended_vsize]
            if passage_mask is not None:
                passage_mask = tf.expand_dims(passage_mask, axis=-1)
                one_hot_spare_rep = one_hot_spare_rep * passage_mask
            one_hot_spare_rep = tf.sparse_reduce_sum(one_hot_spare_rep, axis=1) # [batch_size, extended_vsize]
            vocab_dist = tf.add(vocab_dist, one_hot_spare_rep)
            if self.options.add_first_word_prob_for_phrase:
                vocab_dist = tf.nn.softmax(vocab_dist) # normalize
        return vocab_dist # [batch_size, extended_vsize]
def linear(args, output_size, bias=True, bias_start=0.0, scope=None):
    if args is None or (isinstance(args, (list, tuple)) and not args):
        raise ValueError("`args` must be specified")
    if not isinstance(args, (list, tuple)):
        args = [args]
 | 
	tensorflow.sparse_reduce_sum | 7 | 
| 
	import tensorflow as tf
        self.qnode = qnode
        dtype = tf.float32 if tf.keras.backend.floatx() == tf.float32 else tf.float64
 | 
	tensorflow.keras.backend.floatx | 8 | 
| 
	from tensorflow.contrib.learn.python.learn.graph_actions import train
      self._check_inputs(features, targets)
      train_op, loss_op = self._get_train_ops(features, targets)
      return train(
          graph=g,
 | 
	tensorflow.contrib.learn.python.learn.graph_actions.train | 9 | 
| 
	import tensorflow as tf
        encoder_state = tuple(encoder_state[-1] for _ in range(num_layers))
        decoder_cell = attention(encoder_out, seq_lens)
        dense_layer = tf.layers.Dense(n_mels * resampled)
 | 
	tensorflow.layers.Dense | 10 | 
| 
	from tensorflow.python.ops import check_ops
        raise ValueError("%s.ndims=%d is not 0 (scalar)" %
                         (x.name, x.get_shape().ndims))
      if x_value_static < 0:
        raise ValueError("%s.value=%d cannot be negative" %
                         (x.name, x_value_static))
      return x
    if self.validate_args:
      x = control_flow_ops.with_dependencies([
          check_ops.assert_rank(x, 0),
          check_ops.assert_non_negative(x)], x)
    return x
  def _introspect_ndims(self, ndims):
    """Helper to establish some properties of input ndims args."""
    if self._is_all_constant_helper(ndims):
      return (tensor_util.constant_value(ndims),
 | 
	tensorflow.python.ops.check_ops.assert_rank | 11 | 
| 
	import tensorflow as tf
        [layer_input[0]['observation'], layer_input[1]], axis=1)
  return tf.keras.layers.Lambda(f)
 | 
	tensorflow.keras.layers.Lambda | 12 | 
| 
	from tensorflow.contrib import losses
      with ops.control_dependencies([check_shape_op]):
        target = array_ops.reshape(
            target, shape=[array_ops.shape(target)[0], 1])
      return losses.hinge_loss(logits, target)
    super(_BinarySvmTargetColumn, self).__init__(
 | 
	tensorflow.contrib.losses.hinge_loss | 13 | 
| 
	import tensorflow as tf
      A tuple of possible batch sizes
    """
    for device in device_lib.list_local_devices():
      if tf.DeviceSpec.from_string(device.name).device_type == "GPU":
        if "K20" in device.physical_device_desc:
          return (16,)
        if "P100" in device.physical_device_desc:
          return (16, 32, 64)
      if tf.DeviceSpec.from_string(device.name).device_type == "TPU":
        return (32,)
    return (16, 32)
  def _force_device_sync(self):
    """Shamelessly copied from `resnet50_test.py`."""
    tf.constant(1.).cpu()
 | 
	tensorflow.DeviceSpec.from_string | 14 | 
| 
	import tensorflow as tf
# In this example, we limit mnist data
Xtr, Ytr = mnist.train.next_batch(5000)  #5000 for training (nn candidates)
Xte, Yte = mnist.test.next_batch(200)  #200 for testing
# tf Graph Input
xtr = tf.placeholder("float", [None, 784])
xte = tf.placeholder("float", [784])
# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
                         reduction_indices=1)
# Prediction: Get min distance index (Nearest neighbor)
pred = tf.arg_min(distance, 0)
accuracy = 0.
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
 | 
	tensorflow.negative | 15 | 
| 
	import tensorflow as tf
        if extra_inputs is None:
            extra_inputs = tuple()
        last_loss = f_loss(*(tuple(inputs) + extra_inputs))
        start_time = time.time()
        dataset = BatchDataset(inputs,
                               self._batch_size,
                               extra_inputs=extra_inputs)
        sess = tf.compat.v1.get_default_session()
        for epoch in range(self._max_epochs):
            if self._verbose:
                logger.log('Epoch {}'.format(epoch))
                progbar = pyprind.ProgBar(len(inputs[0]))
            for batch in dataset.iterate(update=True):
                sess.run(self._train_op,
                         dict(list(zip(self._input_vars, batch))))
                if self._verbose:
 | 
	tensorflow.compat.v1.get_default_session | 16 | 
| 
	from tensorflow.python.ops import variables
      yield
  def _setupDense(self, is_distributed, dtype):
    with self._maybeWithDevice("/job:ps" if is_distributed else None):
      var0 = variables.Variable([[0.0, 1.0], [2.0, 3.0]], dtype=dtype)
      var1 = variables.Variable([4.0, 5.0], dtype=dtype)
    with self._maybeWithDevice("/job:worker" if is_distributed else None):
      grads0 = constant_op.constant([[0.1, 0.1], [0.1, 0.1]], dtype=dtype)
 | 
	tensorflow.python.ops.variables.Variable | 17 | 
| 
	import tensorflow as tf
  """Checks that `perm` is valid."""
  with tf.name_scope(name, 'maybe_validate_perm', [perm]):
    assertions = []
    if not perm.dtype.is_integer:
      raise TypeError('`perm` must be integer type')
    msg = '`perm` must be a vector.'
    if perm.shape.ndims is not None:
      if perm.shape.ndims != 1:
        raise ValueError(
            msg[:-1] + ', saw rank: {}.'.format(perm.shape.ndims))
    elif validate_args:
      assertions += [tf.compat.v1.assert_rank(perm, 1, message=msg)]
    perm_ = tf.get_static_value(perm)
    msg = '`perm` must be a valid permutation vector.'
    if perm_ is not None:
      if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)):
        raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_))
    elif validate_args:
      assertions += [
          tf.compat.v1.assert_equal(
              tf.sort(perm), tf.range(tf.size(input=perm)), message=msg)
      ]
    return assertions
 | 
	tensorflow.get_static_value | 18 | 
| 
	import tensorflow as tf
        assert len(all_vars) == len(all_perturbed_vars)
        perturb_ops = []
        for var, perturbed_var in zip(all_vars, all_perturbed_vars):
            if param_noise_filter_func(perturbed_var):
                # Perturb this variable.
                operation = tf.assign(perturbed_var,
                                      var + tf.random_normal(shape=tf.shape(var), mean=0.,
                                                             stddev=param_noise_scale))
            else:
                # Do not perturb, just assign.
                operation = tf.assign(perturbed_var, var)
            perturb_ops.append(operation)
        assert len(perturb_ops) == len(all_vars)
        return tf.group(*perturb_ops)
    # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
    # of the network and measures the effect of that perturbation in action space. If the perturbation
    # is too big, reduce scale of perturbation, otherwise increase.
    with tf.variable_scope("adaptive_model", reuse=False):
        adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
    perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
    kl_loss = tf.reduce_sum(
        tf.nn.softmax(policy.q_values) *
        (tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))),
        axis=-1)
    mean_kl = tf.reduce_mean(kl_loss)
 | 
	tensorflow.group | 19 | 
| 
	from tensorflow.python.ops import array_ops
            array_ops.size(tensor.shape) + dim, [1])
      else:
        expand_dims = [dim]
      expanded_shape = array_ops.concat(
          0, (array_ops.slice(tensor.shape, [0], expand_dims), [1],
              array_ops.slice(tensor.shape, expand_dims, [-1])),
          name='expanded_shape')
      expanded = sparse_ops.sparse_reshape(
 | 
	tensorflow.python.ops.array_ops.slice | 20 | 
| 
	import tensorflow as tf
def load_agent_ckpt(ckpt_dir, tf_agent, global_step=None):
  if global_step is None:
    global_step = tf.compat.v1.train.get_or_create_global_step()
  train_checkpointer = common.Checkpointer(
      ckpt_dir=ckpt_dir, agent=tf_agent, global_step=global_step)
  train_checkpointer.initialize_or_restore().assert_existing_objects_matched()
 | 
	tensorflow.compat.v1.train.get_or_create_global_step | 21 | 
| 
	from tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections
    conv_dims: Optional convolution dimensionality, when set it would use the
      corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
      leaved to None it would select the convolution dimensionality based on
      the input rank (i.e. Conv ND, with N = input_rank - 2).
  Returns:
    A tensor representing the output of the operation.
  Raises:
    ValueError: If `data_format` is invalid.
    ValueError: Both 'rate' and `stride` are not uniformly 1.
  """
    if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
        raise ValueError('Invalid data_format: %r' % (data_format,))
    layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})
    with variable_scope.variable_scope(scope, 'Conv', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
        inputs = ops.convert_to_tensor(inputs)
        input_rank = inputs.get_shape().ndims
        if conv_dims is not None and conv_dims + 2 != input_rank:
            raise ValueError('Convolution expects input with rank %d, got %d' % (conv_dims + 2, input_rank))
        if input_rank == 3:
            layer_class = convolutional_layers.Convolution1D
        elif input_rank == 4:
            layer_class = MyConv2D
        elif input_rank == 5:
 | 
	tensorflow.contrib.layers.python.layers.layers._build_variable_getter | 22 | 
| 
	import tensorflow as tf
      with tf.device("/device:CPU:0"):
        ds = tf.data.Dataset.from_tensors(tensors).repeat()
      return tfe.Iterator(ds)
    self._benchmark_eager_train(
        "eager_train_dataset_with_defun",
        make_iterator,
        device_and_data_format(),
        defun=True)
if __name__ == "__main__":
  tf.enable_eager_execution()
  tf.test.main()
 | 
	tensorflow.enable_eager_execution | 23 | 
| 
	import tensorflow as tf
                  "MSE": mse, "eval_loss": loss,}
      elif task_name == "cola":
        def metric_fn(per_example_loss, label_ids, logits, is_real_example):
          """Compute Matthew's correlations for STS-B."""
          predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
          # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
          tp, tp_op = tf.metrics.true_positives(
              predictions, label_ids, weights=is_real_example)
          tn, tn_op = tf.metrics.true_negatives(
              predictions, label_ids, weights=is_real_example)
          fp, fp_op = tf.metrics.false_positives(
              predictions, label_ids, weights=is_real_example)
          fn, fn_op = tf.metrics.false_negatives(
              predictions, label_ids, weights=is_real_example)
          # Compute Matthew's correlation
 | 
	tensorflow.metrics.true_negatives | 24 | 
| 
	import tensorflow as tf
        except AttributeError:
            deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
 | 
	tensorflow.nn.deconv2d | 25 | 
| 
	from tensorflow.contrib.eager.python.examples.revnet import blocks_test
    self.assertEqual(len(g2_all.shape), 1)
    degree = blocks_test.compute_degree(g1_all, g2_all)
    self.assertLessEqual(degree, 1e0)
 | 
	tensorflow.contrib.eager.python.examples.revnet.blocks_test.compute_degree | 26 | 
| 
	import tensorflow as tf
        
            # loss and optimizer
            self.loss = tf.reduce_mean(tf.square(tf.subtract(self.value_estimate, self.target)))
            self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
 | 
	tensorflow.subtract | 27 | 
| 
	import tensorflow as tf
              feed_previous=tf.constant(True))
        sess.run([tf.global_variables_initializer()])
        tf.get_variable_scope().reuse_variables()
        d1, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp, cell, num_encoder_symbols=2,
            num_decoder_symbols=5, embedding_size=2, feed_previous=True)
        d2, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
            enc_inp, dec_inp2, cell, num_encoder_symbols=2,
            num_decoder_symbols=5, embedding_size=2, feed_previous=True)
        res1 = sess.run(d1)
        res2 = sess.run(d2)
        res3 = sess.run(d3)
 | 
	tensorflow.nn.seq2seq.embedding_attention_seq2seq | 28 | 
| 
	import tensorflow as tf
    dep_idxs = tf.tile(tf.expand_dims(dep_org_idx, 1), [1, sl_head, 1])
    head_idxs = tf.tile(tf.expand_dims(head_org_idx, 2), [1, 1, sl_dep])
    if direction is None:
        direct_mask = tf.not_equal(head_idxs, dep_idxs)  # [bs, slh, sld]
    else:
        if direction == 'forward':
            direct_mask = tf.greater(head_idxs, dep_idxs)  # [bs, slh, sld]
        else:
            direct_mask = tf.less(head_idxs, dep_idxs)  # [bs, slh, sld]
    # [bs, slh, slh]
    rep_mask_tile = tf.logical_and(tf.expand_dims(rep_dep_mask, 1), tf.expand_dims(rep_head_mask, 2))
    attn_mask = tf.logical_and(direct_mask, rep_mask_tile)  # [bs, slh, sld]
    # tensor tile
    rep_map_tile = tf.tile(tf.expand_dims(rep_dep_tensor, 1), [1, sl_head, 1, 1])  # bs,slh,sld,vec
    with tf.variable_scope('attention'):  # bs,sl,sl,vec
        f_bias = tf.get_variable('f_bias', [ivec], tf.float32, tf.constant_initializer(0.))
 | 
	tensorflow.less | 29 | 
| 
	from tensorflow.python.ops import math_ops
      tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
      tf_index = math_ops.cast(tf_index, dtypes.int32)
      # Now, we have the implicit threshold, so compute the sensitivity:
      return math_ops.div(tp[tf_index],
                          tp[tf_index] + fn[tf_index] + kepsilon,
                          name)
 | 
	tensorflow.python.ops.math_ops.div | 30 | 
| 
	import tensorflow as tf
    self.validateMoments([10**5], -5.0, 1.0, 2.0, np.infty)
  def testSmallStddev(self):
    self.validateKolmogorovSmirnov([10**5], 0.0, 0.1, 0.05, 0.10)
class ParameterizedTruncatedNormalGpuTest(ParameterizedTruncatedNormalTest):
  _use_gpu = True
# Benchmarking code
def parameterized_vs_naive(shape, num_iters, use_gpu=False):
  np.random.seed(1618)  # Make it reproducible.
  # No CSE/CF.
  optimizer_options = tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)
  config = tf.ConfigProto(
      graph_options=tf.GraphOptions(optimizer_options=optimizer_options))
  with tf.Session(config=config) as sess:
    with tf.device("/cpu:0" if not use_gpu else None):
      param_op = tf.group(random_ops.parameterized_truncated_normal(shape))
      naive_op = tf.group(random_ops.truncated_normal(shape))
    # Burn-in to avoid session setup costs in the timing.
    sess.run(param_op)
    sess.run(param_op)
    param_dt = timeit.timeit(lambda: sess.run(param_op), number=num_iters)
    sess.run(naive_op)
    sess.run(naive_op)
 | 
	tensorflow.OptimizerOptions | 31 | 
| 
	from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
      global_step = contrib_framework.create_global_step(g)
      features, targets = input_fn()
      self._check_inputs(features, targets)
      eval_dict = self._get_eval_ops(features, targets, metrics or
                                     self._get_default_metric_functions())
      eval_results, _ = evaluate(
          graph=g,
          output_dir=eval_dir,
          checkpoint_path=checkpoint_path,
          eval_dict=eval_dict,
 | 
	tensorflow.contrib.learn.python.learn.graph_actions.evaluate | 32 | 
| 
	from tensorflow.contrib.learn.python.learn import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
  """Ops tests."""
  def test_softmax_classifier(self):
    with self.cached_session() as session:
      features = array_ops.placeholder(dtypes.float32, [None, 3])
      labels = array_ops.placeholder(dtypes.float32, [None, 2])
      weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]])
      biases = constant_op.constant([0.2, 0.3])
      class_weight = constant_op.constant([0.1, 0.9])
      prediction, loss = ops.softmax_classifier(features, labels, weights,
                                                biases, class_weight)
      self.assertEqual(prediction.get_shape()[1], 2)
      self.assertEqual(loss.get_shape(), [])
      value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]})
      self.assertAllClose(value, 0.55180627)
  def test_embedding_lookup(self):
    d_embed = 5
    n_embed = 10
    ids_shape = (2, 3, 4)
    embeds = np.random.randn(n_embed, d_embed)
    ids = np.random.randint(0, n_embed, ids_shape)
 | 
	tensorflow.contrib.learn.python.learn.ops.softmax_classifier | 33 | 
| 
	import tensorflow as tf
      hparams, tf.estimator.ModeKeys.TRAIN
  )
  try:
    num_target_frames = hparams.video_num_target_frames
  except AttributeError:
    num_target_frames = 1
  target_value_shape_suffix = [num_target_frames]
  if distributional_size > 1:
    target_value_shape_suffix = [num_target_frames, distributional_size]
  features = {
      "inputs": observations,
      "epoch": tf.constant(epoch + 1),
      "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
      "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32),
      "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]),
      "target_action": tf.zeros(
          obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
      "target_reward": tf.zeros(
          obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32),
      "target_policy": tf.zeros(
          obs_shape[:1] + [num_target_frames] + [action_space.n]),
      "target_value": tf.zeros(
          obs_shape[:1] + target_value_shape_suffix)
  }
  model.distributional_value_size = max(distributional_size, 1)
  model.use_epochs = hparams.use_epochs
  with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
    t2t_model.create_dummy_vars()
    (targets, _) = model(features)
  target_values = targets["target_value"][:, 0]
 | 
	tensorflow.zeros | 34 | 
| 
	from tensorflow.python.framework import ops
    auc = compute_auc(tp, fn, tn, fp, 'value')
    update_op = compute_auc(
        tp_update_op, fn_update_op, tn_update_op, fp_update_op, 'update_op')
    if metrics_collections:
      ops.add_to_collections(metrics_collections, auc)
    if updates_collections:
      ops.add_to_collections(updates_collections, update_op)
 | 
	tensorflow.python.framework.ops.add_to_collections | 35 | 
| 
	from tensorflow.python.ops import logging_ops as logging
  def after_create_session(self, session, _):
    assert self._init_op.graph == ops.get_default_graph()
    assert self._is_initialized_op.graph == self._init_op.graph
    while True:
      try:
        if session.run(self._is_initialized_op):
          break
        elif self._is_chief:
          session.run(self._init_op)
        else:
          time.sleep(1)
      except RuntimeError as e:
        logging.info(e)
class GMM(estimator.Estimator):
  """An estimator for GMM clustering."""
  SCORES = 'scores'
  ASSIGNMENTS = 'assignments'
  ALL_SCORES = 'all_scores'
  def __init__(self,
               num_clusters,
               model_dir=None,
 | 
	tensorflow.python.ops.logging_ops.info | 36 | 
| 
	import tensorflow as tf
        for i in range(0, config.num_clones):
            with tf.name_scope(config.clone_scope(i)) as clone_scope:
                clone_device = config.clone_device(i)
                with tf.device(clone_device):
                    with tf.variable_scope(tf.get_variable_scope(),
                                           reuse=True if i > 0 else None):
                        outputs = model_fn(*args, **kwargs)
                    clones.append(Clone(outputs, clone_scope, clone_device))
 | 
	tensorflow.get_variable_scope | 37 | 
| 
	from tensorflow.python.ops import array_ops
  # Use static shape if known.
  num_predictions = predictions_2d.get_shape().as_list()[0]
  # Otherwise use dynamic shape.
  if num_predictions is None:
    num_predictions = array_ops.shape(predictions_2d)[0]
  thresh_tiled = array_ops.tile(
      array_ops.expand_dims(array_ops.constant(thresholds), [1]),
      array_ops.pack([1, num_predictions]))
  # Tile the predictions after thresholding them across different thresholds.
  pred_is_pos = math_ops.greater(
      array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
      thresh_tiled)
  pred_is_neg = math_ops.logical_not(pred_is_pos)
 | 
	tensorflow.python.ops.array_ops.constant | 38 | 
| 
	import tensorflow as tf
    def validation_mapper(byte):
        image = tf.image.decode_jpeg(
            tf.reshape(byte, shape=[]), 3, **JPEG_OPT)
        image = resize_shortest_edge(image, tf.shape(image), 256)
        image = center_crop(image, 224)
        image = tf.reverse(image, axis=[2])  # to BGR
        return image
    def training_mapper(byte):
        jpeg_shape = tf.image.extract_jpeg_shape(byte)  # hwc
        bbox_begin, bbox_size, distort_bbox = tf.image.sample_distorted_bounding_box(
 | 
	tensorflow.reverse | 39 | 
| 
	from tensorflow.contrib.eager.python import tfe
  """Trains model on train_data using optimizer."""
  tf.train.get_or_create_global_step()
  def model_loss(labels, chars, sequence_length):
    predictions = model((chars, sequence_length), training=True)
    loss_value = loss(labels, predictions)
    tf.contrib.summary.scalar("loss", loss_value)
    return loss_value
  for (batch, (labels, chars, sequence_length)) in enumerate(
      tfe.Iterator(train_data)):
    with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
      batch_model_loss = functools.partial(model_loss, labels, chars,
                                           sequence_length)
      optimizer.minimize(
          batch_model_loss, global_step=tf.train.get_global_step())
      if log_interval and batch % log_interval == 0:
        print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv"
 | 
	tensorflow.contrib.eager.python.tfe.Iterator | 40 | 
| 
	from tensorflow.core.util.event_pb2 import SessionLog
    logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
    self._last_saved_time = time.time()
    self._last_saved_step = step
    if self._saver is None:
      self._scaffold.saver.save(session, self._save_path, global_step=step)
    else:
      self._saver.save(session, self._save_path, global_step=step)
    self._summary_writer.add_session_log(
        SessionLog(
            status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
        step)
class StepCounter(EveryN):
  """Steps per second monitor."""
 | 
	tensorflow.core.util.event_pb2.SessionLog | 41 | 
| 
	from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
              shape=[1],
              dtype=dtypes.int64,
              default_value=array_ops.zeros(
                  [1], dtype=dtypes.int64))
  }
  items_to_handlers = {
      'image': tfexample_decoder.Image(),
      'label': tfexample_decoder.Tensor('image/class/label'),
  }
  decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
                                               items_to_handlers)
 | 
	tensorflow.contrib.slim.python.slim.data.tfexample_decoder.Image | 42 | 
| 
	import tensorflow as tf
    Returns:
      a tensor with shape [N, M] representing pairwise iou scores.
    """
    intersections = pairwise_intersection(boxlist1, boxlist2)
    areas1 = area(boxlist1)
    areas2 = area(boxlist2)
    unions = (
        tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
    return tf.where(
        tf.equal(intersections, 0.0),
        tf.zeros_like(intersections), tf.truediv(intersections, unions))
 | 
	tensorflow.expand_dims | 43 | 
| 
	from tensorflow.python.ops import math_ops
    def compute_recall(true_positives, false_negatives, name):
      return math_ops.select(
          math_ops.greater(true_positives + false_negatives, 0),
          math_ops.div(true_positives, true_positives + false_negatives),
 | 
	tensorflow.python.ops.math_ops.greater | 44 | 
| 
	import tensorflow as tf
                counts.update(_split_string(line))
        alphabet = [k for (k, _) in counts.most_common(max_size)]
        alphabet.sort()
        return np.asarray(alphabet, dtype=np.object)
    chars, = tf.py_func(_unique_chars, [filename], [tf.string])
    char_to_id = tf.contrib.lookup.index_table_from_tensor(
        chars, num_oov_buckets=num_oov_buckets)
    id_to_char = tf.contrib.lookup.index_to_string_table_from_tensor(chars, " ")
    return char_to_id, id_to_char
 | 
	tensorflow.contrib.lookup.index_table_from_tensor | 45 | 
| 
	import tensorflow as tf
      soft_placement = True
      util.auto_parallel(metagraph, m)
  with tf.Graph().as_default():
    tf.train.import_meta_graph(metagraph)
    for model in models.values():
      model.import_ops()
    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    config_proto = tf.ConfigProto(allow_soft_placement=soft_placement)
    with sv.managed_session(config=config_proto) as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)
 | 
	tensorflow.train.Supervisor | 46 | 
| 
	import tensorflow as tf
sync_lp_time = stop - start
print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s")
# %% [markdown]
# ## Comparison
# To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU.
# %%
from util.plotting import plot_regret
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
sync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0))
async_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0))
plot_regret(
    sync_lp_observations.numpy(), ax[0], num_init=len(initial_data), idx_best=sync_lp_min_idx
)
ax[0].set_yscale("log")
ax[0].set_ylabel("Regret")
ax[0].set_ylim(0.0000001, 100)
ax[0].set_xlabel("# evaluations")
ax[0].set_title(f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}")
plot_regret(
    async_lp_observations.numpy(), ax[1], num_init=len(initial_data), idx_best=async_lp_min_idx
)
ax[1].set_yscale("log")
 | 
	tensorflow.argmin | 47 | 
| 
	import tensorflow as tf
    hparams = imagetransformer_latent_tiny()
    hparams.mode = tf.estimator.ModeKeys.TRAIN
    block_dim = int(hparams.hidden_size // hparams.num_blocks)
    block_v_size = 2**(hparams.bottleneck_bits /
                       (hparams.num_residuals * hparams.num_blocks))
    block_v_size = int(block_v_size)
    means = tf.get_variable(
        name="means",
        shape=[hparams.num_residuals,
               hparams.num_blocks,
               block_v_size,
               block_dim],
        initializer=tf.uniform_unit_scaling_initializer())
    hparams.bottleneck = functools.partial(
        discretization.discrete_bottleneck,
        hidden_size=hparams.hidden_size,
        z_size=hparams.bottleneck_bits,
        filter_size=hparams.filter_size,
        startup_steps=hparams.startup_steps,
        bottleneck_kind=hparams.bottleneck_kind,
        num_blocks=hparams.num_blocks,
        num_residuals=hparams.num_residuals,
        reshape_method=hparams.reshape_method,
        beta=hparams.vq_beta,
 | 
	tensorflow.uniform_unit_scaling_initializer | 48 | 
| 
	from tensorflow.python.ops import math_ops
      tuple.
  """
  predictions, labels = tensor_util.remove_squeezable_dimensions(
      predictions, labels)
  predictions.get_shape().assert_is_compatible_with(labels.get_shape())
  radial_diffs = math_ops.mul(predictions, labels)
  radial_diffs = math_ops.reduce_sum(radial_diffs,
                                     reduction_indices=[dim,],
                                     keep_dims=True)
  mean_distance, update_op = streaming_mean(radial_diffs, weights,
                                            None,
                                            None,
                                            name or 'mean_cosine_distance')
  mean_distance = math_ops.sub(1.0, mean_distance)
  update_op = math_ops.sub(1.0, update_op)
  if metrics_collections:
    ops.add_to_collections(metrics_collections, mean_distance)
  if updates_collections:
    ops.add_to_collections(updates_collections, update_op)
  return mean_distance, update_op
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_percentage_less(values, threshold, ignore_mask=None, weights=None,
                              metrics_collections=None,
 | 
	tensorflow.python.ops.math_ops.sub | 49 | 
| 
	from tensorflow.contrib.rnn.python.ops import lstm_ops
                          (config_name, self._GetConfigDesc(config)))
  def benchmarkTfRNNLSTMBlockCellTraining(self):
    test_configs = self._GetTestConfig()
    for config_name, config in test_configs.items():
      num_layers = config["num_layers"]
      num_units = config["num_units"]
      batch_size = config["batch_size"]
      seq_length = config["seq_length"]
      with ops.Graph().as_default(), ops.device("/device:GPU:0"):
        inputs = seq_length * [
            array_ops.zeros([batch_size, num_units], dtypes.float32)
        ]
        cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units)  # pylint: disable=cell-var-from-loop
        multi_cell = rnn_cell.MultiRNNCell(
            [cell() for _ in range(num_layers)])
        outputs, final_state = core_rnn.static_rnn(
            multi_cell, inputs, dtype=dtypes.float32)
        trainable_variables = ops.get_collection(
            ops.GraphKeys.TRAINABLE_VARIABLES)
        gradients = gradients_impl.gradients([outputs, final_state],
                                             trainable_variables)
        training_op = control_flow_ops.group(*gradients)
        self._BenchmarkOp(training_op, "tf_rnn_lstm_block_cell %s %s" %
                          (config_name, self._GetConfigDesc(config)))
 | 
	tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell | 50 | 
| 
	from tensorflow.python.training import checkpoint_utils
    #    decrease with training.
    summary_file = glob.glob(os.path.join(config.logdir, "events.out.*"))[0]
    events = summary_test_util.events_from_file(summary_file)
    train_losses = [event.summary.value[0].simple_value for event in events
                    if event.summary.value
                    and event.summary.value[0].tag == "train/loss"]
    self.assertEqual(config.epochs, len(train_losses))
    self.assertLess(train_losses[-1], train_losses[0])
    # 5. Verify that checkpoints exist and contains all the expected variables.
    self.assertTrue(glob.glob(os.path.join(config.logdir, "ckpt*")))
    ckpt_variable_names = [
        item[0] for item in checkpoint_utils.list_variables(config.logdir)]
    self.assertIn("global_step", ckpt_variable_names)
    for v in trainer.variables:
      variable_name = v.name[:v.name.index(":")] if ":" in v.name else v.name
      self.assertIn(variable_name, ckpt_variable_names)
class EagerSpinnSNLIClassifierBenchmark(test.Benchmark):
  def benchmarkEagerSpinnSNLIClassifier(self):
    test_device = "gpu:0" if tfe.num_gpus() else "cpu:0"
    with tf.device(test_device):
 | 
	tensorflow.python.training.checkpoint_utils.list_variables | 51 | 
| 
	import tensorflow as tf
      logits = tf.log([1.0 - self._relabel_prob, self._relabel_prob])
      mask = tf.squeeze(
          tf.random.categorical(
              logits[None], num_samples=self._sample_batch_size))
 | 
	tensorflow.random.categorical | 52 | 
| 
	import tensorflow.contrib.eager as tfe
  dataset = random_dataset()
  if defun:
    model.call = tfe.defun(model.call)
  with tf.device(device()):
 | 
	tensorflow.contrib.eager.defun | 53 | 
| 
	from tensorflow.python.ops import gen_nn_ops
    name: A name for the operation (optional).
  Returns:
    A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
    softmax cross entropy loss.
  """
  # The second output tensor contains the gradients.  We use it in
  # _CrossEntropyGrad() in nn_grad but not here.
  cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
      logits, labels, name=name)
  return cost
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
  """Computes sparse softmax cross entropy between `logits` and `labels`.
 | 
	tensorflow.python.ops.gen_nn_ops._softmax_cross_entropy_with_logits | 54 | 
| 
	from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
        config=config,
        params={
            "head":
                head_lib._regression_head(  # pylint: disable=protected-access
                    label_dimension=label_dimension,
                    weight_column_name=weight_column_name,
 | 
	tensorflow.contrib.learn.python.learn.estimators.head._regression_head | 55 | 
| 
	import tensorflow as tf
    input_shape = [batch_size, image_size, image_size, input_nchan]
    images = tf.truncated_normal(
        input_shape,
        dtype=input_data_type,
        stddev=1e-1,
        name='synthetic_images')
    labels = tf.random_uniform(
        [batch_size],
        minval=1,
        maxval=nclass,
        dtype=tf.int32,
        name='synthetic_labels')
    # Note: This results in a H2D copy, but no computation
    # Note: This avoids recomputation of the random values, but still
    #         results in a H2D copy.
    images = tf.contrib.framework.local_variable(images, name='images')
    labels = tf.contrib.framework.local_variable(labels, name='labels')
    # Change to 0-based (don't use background class like Inception does)
    labels -= 1
    if num_compute_devices == 1:
      images_splits = [images]
      labels_splits = [labels]
    else:
      images_splits = tf.split(images, num_compute_devices, 0)
      labels_splits = tf.split(labels, num_compute_devices, 0)
  return nclass, images_splits, labels_splits
def create_config_proto():
  config = tf.ConfigProto()
 | 
	tensorflow.contrib.framework.local_variable | 56 | 
| 
	import tensorflow as tf
    else:
      graph_def.ParseFromString(f.read())
  with graph.as_default():
    tf.import_graph_def(graph_def, name='')
    tf.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False)
  return graph
 | 
	tensorflow.io.write_graph | 57 | 
| 
	from tensorflow.python.ops import gen_math_ops
  \\\\(y = |x|\\\\).
  See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
  number.
  Args:
    x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
    name: A name for the operation (optional).
  Returns:
     A `Tensor` the same size and type as `x` with absolute values.
  """
  with ops.op_scope([x], name, "Abs") as name:
    x = ops.convert_to_tensor(x, name="x")
    if x.dtype == types.complex64:
      return gen_math_ops.complex_abs(x, name=name)
    return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
  """Computes the power of one value to another.
  Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
  corresponding elements in `x` and `y`. For example:
  ```
  # tensor 'x' is [[2, 2]], [3, 3]]
  # tensor 'y' is [[8, 16], [2, 3]]
  tf.pow(x, y) ==> [[256, 65536], [9, 27]]
 | 
	tensorflow.python.ops.gen_math_ops.complex_abs | 58 | 
| 
	from tensorflow.python.framework import ops
    if not inputs or not isinstance(inputs, (list, tuple)):
      raise ValueError("inputs must be a list of at least one Tensor with the "
                       "same dtype and shape")
    inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
    if not all(isinstance(x, ops.Tensor) for x in inputs):
      raise ValueError("inputs must be a list of at least one Tensor with the "
 | 
	tensorflow.python.framework.ops.convert_n_to_tensor_or_indexed_slices | 59 | 
| 
	import tensorflow as tf
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
import keras
import tensorflow as tf
from tensorflow import keras as tf_keras
# prevent Keras from using up all gpu memory
if tf.executing_eagerly():
    gpus = tf.config.list_physical_devices('GPU')
    for gpu in gpus:
        tf.config.experimental.set_memory_growth(gpu, True)
else:
    from keras.backend.tensorflow_backend import set_session
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.5
    set_session(tf.Session(config=config))
def pytest_generate_tests(metafunc):
 | 
	tensorflow.config.list_physical_devices | 60 | 
| 
	import tensorflow as tf
  rl_advantage = rl_reward - rl_baseline
  rl_empirical_loss = -tf.stop_gradient(rl_advantage) * log_prob
  rl_entropy_loss = -rl_entropy_regularization * rl_entropy
  enable_rl_optimizer = tf.cast(
      tf.greater_equal(target_global_step, FLAGS.first_pretrain_steps),
      tf.float32)
  rl_learning_rate = FLAGS.rl_learning_rate * enable_rl_optimizer
  rl_learning_rate = tf.train.piecewise_constant(
      target_global_step, [800,],
      [rl_learning_rate, rl_learning_rate * 0.1])
  optimizer = tf.train.AdamOptimizer(rl_learning_rate)
  target_train_op = optimizer.minimize(
      rl_empirical_loss,
      target_global_step,
      var_list=tf.trainable_variables(rl_scope.name))
 | 
	tensorflow.train.piecewise_constant | 61 | 
| 
	import tensorflow as tf
            tf.add_to_collection(self._initial_state_name, state_tuple.c)
            tf.add_to_collection(self._initial_state_name, state_tuple.h)
        for state_tuple in self._final_state:
            tf.add_to_collection(self._final_state_name, state_tuple.c)
            tf.add_to_collection(self._final_state_name, state_tuple.h)
    def import_state_tuples(self, state_tuples, name, num_replicas):
        restored = []
        for i in range(len(state_tuples) * num_replicas):
            c = tf.get_collection_ref(name)[2 * i + 0]
            h = tf.get_collection_ref(name)[2 * i + 1]
            restored.append(tf.contrib.rnn.LSTMStateTuple(c, h))
        return tuple(restored)
    def import_ops(self):
        if self._is_training:
            self._train_op = tf.get_collection_ref('train_op')[0]
            self._lr = tf.get_collection_ref('lr')[0]
            self._new_lr = tf.get_collection_ref('new_lr')[0]
            self._lr_update = tf.get_collection_ref('lr_update')[0]
            rnn_params = tf.get_collection_ref('rnn_params')
            if self._cell and rnn_params:
 | 
	tensorflow.contrib.rnn.LSTMStateTuple | 62 | 
| 
	import tensorflow as tf
        # optimizer & gradients
        optimizer_base = tf.train.MomentumOptimizer(lrn_rate, FLAGS.momentum)
        if not FLAGS.enbl_multi_gpu:
          optimizer = optimizer_base
        else:
          optimizer = mgw.DistributedOptimizer(optimizer_base)
        grads_origin = optimizer.compute_gradients(loss, self.trainable_vars)
        grads_pruned = self.__calc_grads_pruned(grads_origin)
      # TF operations & model saver
      self.sess_train = sess
      with tf.control_dependencies(self.update_ops):
        self.train_op = optimizer.apply_gradients(grads_pruned, global_step=self.global_step)
      self.summary_op = tf.summary.merge_all()
      self.log_op = [lrn_rate, loss, pr_trainable, pr_maskable] + list(metrics.values())
      self.log_op_names = ['lr', 'loss', 'pr_trn', 'pr_msk'] + list(metrics.keys())
      self.init_op = tf.variables_initializer(self.vars)
      self.init_opt_op = tf.variables_initializer(optimizer_base.variables())
      if FLAGS.enbl_multi_gpu:
        self.bcast_op = mgw.broadcast_global_variables(0)
      self.saver_train = tf.train.Saver(self.vars)
  def __build_eval(self):
    """Build the evaluation graph."""
    with tf.Graph().as_default():
      # create a TF session for the current graph
 | 
	tensorflow.summary.merge_all | 63 | 
| 
	import tensorflow as tf
        self.Z = tf.placeholder(tf.float32, (None, None, fourier_window_size // 2 + 1))
        batch_size = tf.shape(self.X)[0]
        seq_lens = tf.count_nonzero(tf.reduce_sum(self.decoder_inputs, -1), 1, dtype=tf.int32) + 1
        def cells(reuse=False):
            return tf.contrib.rnn.DropoutWrapper(
                tf.nn.rnn_cell.LSTMCell(
                    size_layers, initializer=tf.orthogonal_initializer(), reuse=reuse
                ),
                state_keep_prob=dropout,
                output_keep_prob=dropout,
            )
        def attention(encoder_out, seq_len, reuse=False):
            attention_mechanism = tf.contrib.seq2seq.LuongAttention(
                num_units=size_layers, memory=encoder_out, memory_sequence_length=seq_len
            )
            return tf.contrib.seq2seq.AttentionWrapper(
                cell=tf.nn.rnn_cell.MultiRNNCell([cells(reuse) for _ in range(num_layers)]),
                attention_mechanism=attention_mechanism,
                attention_layer_size=size_layers,
                alignment_history=True,
            )
        encoder_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
        encoder_out, encoder_state = tf.nn.dynamic_rnn(
            cell=encoder_cells, inputs=forward, sequence_length=seq_lens, dtype=tf.float32
        )
 | 
	tensorflow.contrib.seq2seq.LuongAttention | 64 | 
| 
	import tensorflow as tf
    # train the model using Adam
    def train(self, sess, generator,
              learning_rate=.001, training_iters=50000,
              batch_size=64, display_step=10,weight_save_step=100, save_weights_path= None,
              generator_function= None, training_weights_path = None):
        # train with gradient clipping
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        grads = optimizer.compute_gradients(self.loss)
        clipped_grads = [(tf.clip_by_norm(grad, 1.0), var)
                         if grad is not None else (grad, var)
                        for grad, var in grads]
        # add vanishing gradient regularizer
        #out, test = self.dOmega_dWrec()
        #clipped_grads[0] = (tf.add(out[0], clipped_grads[0][0]), clipped_grads[0][1])
        #clipped_grads[0] = (tf.Print(clipped_grads[0][0], [clipped_grads[0][0]], "gw_rec"), clipped_grads[0][1])
        optimize = optimizer.apply_gradients(clipped_grads)
 | 
	tensorflow.clip_by_norm | 65 | 
| 
	from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
        gradient_multipliers=(
            dnn_linear_combined._extract_embedding_lr_multipliers(  # pylint: disable=protected-access
 | 
	tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined._extract_embedding_lr_multipliers | 66 | 
| 
	import tensorflow as tf
            # strides = np.asarray(self.pool_strides)
            # strides[1:] *= len(self.ff_conv_k)
            # kernels = np.asarray(self.pooling_kernel)
            # kernels[1:] *= len(self.ff_conv_k)
            # return tf.layers.conv3d_transpose(
            #     inputs=x,
            #     strides=strides,
            #     padding=self.padding,
            #     filters=y_size[-1],
            #     kernel_size=kernels,
            #     trainable=self.train,
            #     use_bias=use_bias,
            #     activation=self.ff_nl)
            resized = tf.nn.conv3d_transpose(
                value=x,
                filter=kernel,
                output_shape=y_size,
                strides=[1] + strides + [1],
                padding=self.padding,
                name='resize_x_to_y')
            resized = tf.nn.bias_add(
                resized,
                bias)
            resized = self.ff_nl(resized)
            return resized
        elif mode == 'replicate_n_transpose':
 | 
	tensorflow.nn.conv3d_transpose | 67 | 
| 
	import tensorflow as tf
                      env_floor=0) for i in range(N_WORKER)]
    # 觀察者
    # workers.append(Worker(envpath='./ObstacleTower/obstacletower.exe',
    #                       wid=N_WORKER + 1,
    #                       retro=False,
    #                       realtime_mode=True,
    #                       env_seed=0,
    #                       env_floor=0))
    GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
    GLOBAL_RUNNING_R = []
    COORD = tf.train.Coordinator()
    # 宣告共用記憶體
    QUEUE = queue.Queue()
    threads = []
    for worker in workers:  # worker threads
        t = threading.Thread(target=worker.work, args=())
        t.start()  # training
        threads.append(t)
    # 建立模型更新的執行緒
    threads.append(threading.Thread(target=GLOBAL_KPRUN.update, ))
    threads[-1].start()
    COORD.join(threads)
 | 
	tensorflow.train.Coordinator | 68 | 
| 
	import tensorflow as tf
          total_loss, learning_rate, num_train_steps, num_warmup_steps,
          use_tpu, optimizer)
      output_spec = contrib_tpu.TPUEstimatorSpec(
          mode=mode,
          loss=total_loss,
          train_op=train_op,
          scaffold_fn=scaffold_fn)
    elif mode == tf.estimator.ModeKeys.EVAL:
      if task_name not in ["sts-b", "cola"]:
        def metric_fn(per_example_loss, label_ids, logits, is_real_example):
          predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
          accuracy = tf.metrics.accuracy(
              labels=label_ids, predictions=predictions,
              weights=is_real_example)
          loss = tf.metrics.mean(
              values=per_example_loss, weights=is_real_example)
          return {
              "eval_accuracy": accuracy,
              "eval_loss": loss,
          }
      elif task_name == "sts-b":
        def metric_fn(per_example_loss, label_ids, logits, is_real_example):
          """Compute Pearson correlations for STS-B."""
          # Display labels and predictions
          concat1 = contrib_metrics.streaming_concat(logits)
          concat2 = contrib_metrics.streaming_concat(label_ids)
          # Compute Pearson correlation
          pearson = contrib_metrics.streaming_pearson_correlation(
 | 
	tensorflow.metrics.mean | 69 | 
| 
	import tensorflow as tf
    shape = control_flow_ops.with_dependencies([rank_assertions[i]],
                                               tf.shape(image))
 | 
	tensorflow.shape | 70 | 
| 
	import tensorflow as tf
def clip_logits(logits, config):
  logits_clip = getattr(config, "logits_clip", 0.)
  if logits_clip > 0:
    min_logit = tf.reduce_min(logits)
    return tf.minimum(logits - min_logit, logits_clip)
  else:
 | 
	tensorflow.reduce_min | 71 | 
| 
	import tensorflow as tf
    loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small))
    loss = tf.reduce_mean(loss)
    return loss
def contra_step_lossV3(pred, tgt, margin=1.0):
    # Step-wise contrastive loss
    pred1, pred2 = tf.split(pred, 2, axis=0)
    tgt1, tgt2 = tf.split(tgt, 2, axis=0)
    geq = tf.cast((tgt1 - tgt2) > 0, tf.bool)
    tgt_larg = tf.where(geq, tgt1, tgt2)
    tgt_small = tf.where(geq, tgt2, tgt1)
    pred_larg = tf.where(geq, pred1, pred2)
    pred_small = tf.where(geq, pred2, pred1)
    loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small) + margin)
    loss = tf.reduce_mean(loss)
    return loss
def contra_step_lossV4(pred, tgt):
    # 50*50
    # Step-wise contrastive loss
    even = [2 * i for i in range(25)]
    odd = [2 * i + 1 for i in range(25)]
    pred1 = tf.gather(pred, even)
    pred2 = tf.gather(pred, odd)
    tgt1 = tf.gather(tgt, even)
 | 
	tensorflow.where | 72 | 
| 
	from tensorflow.python.training import training_ops
  def __init__(self, learning_rate, use_locking=False, name="GradientDescent"):
    """Construct a new gradient descent optimizer.
    Args:
      learning_rate: A Tensor or a floating point value.  The learning
        rate to use.
      use_locking: If True use locks for update operations.
      name: Optional name prefix for the operations created when applying
        gradients. Defaults to "GradientDescent".
    """
    super(GradientDescentOptimizer, self).__init__(use_locking, name)
    self._learning_rate = learning_rate
  def _apply_dense(self, grad, var):
    return training_ops.apply_gradient_descent(
        var,
        self._learning_rate_tensor,
        grad,
        use_locking=self._use_locking).op
  def _apply_sparse(self, grad, var):
    delta = ops.IndexedSlices(grad.values * self._learning_rate_tensor,
                              grad.indices, grad.dense_shape)
    return var.scatter_sub(delta, use_locking=self._use_locking)
  def _prepare(self):
    self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
                                                       name="learning_rate")
 | 
	tensorflow.python.training.training_ops.apply_gradient_descent | 73 | 
| 
	from tensorflow.python.client import device_lib
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")
  gpus = [
      x.name for x in device_lib.list_local_devices() if x.device_type == "GPU"
  ]
  if FLAGS.num_gpus > len(gpus):
    raise ValueError(
        "Your machine has only %d gpus "
        "which is less than the requested --num_gpus=%d."
 | 
	tensorflow.python.client.device_lib.list_local_devices | 74 | 
| 
	import tensorflow as tf
        try:
            if not tf.io.gfile.exists(a.crop_dir):
                tf.io.gfile.makedirs(a.crop_dir)
        except Exception as e:
 | 
	tensorflow.io.gfile.makedirs | 75 | 
| 
	import tensorflow as tf
            # execute at test time
            return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)
        return tf.cond(train, func1, func2)
def average_gradients(tower_grads):
 | 
	tensorflow.cond | 76 | 
| 
	from tensorflow.python.ops import nn_ops
        w_c: [1,1, attention_vec_size]
        coverage: [batch_size, passage_len]
        '''
        with variable_scope.variable_scope("Attention"):
            # Equation (11) in the paper
            state_features = linear(decoder_state, attention_vec_size, True) # [batch_size, attention_vec_size]
            state_features = tf.expand_dims(state_features, 1) # [batch_size, 1, attention_vec_size]
            all_features = encoder_features + state_features # [batch_size,passage_len,attention_vec_size]
            if use_coverage and coverage is not None:
                coverage_features = tf.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size]
                all_features += coverage_features
            e = tf.reduce_sum(v * tf.tanh(all_features), axis=-1) # [batch_size, passage_len]
            attn_dist = nn_ops.softmax(e) # [batch_size, passage_len]
            attn_dist *= passage_mask
            if coverage is not None: # Update coverage vector
                coverage += attn_dist
            else: # first step of training
                coverage = attn_dist
            # Calculate the context vector from attn_dist and encoder_states
            # shape (batch_size, attn_size).
            context_vector = tf.reduce_sum(tf.expand_dims(attn_dist, axis=-1) * encoder_states, axis=1) # [batch_size, encoder_dim]
        return context_vector, attn_dist, coverage
 | 
	tensorflow.python.ops.nn_ops.softmax | 77 | 
| 
	from tensorflow.python.ops import math_ops
  def _log_prob(self, x):
    x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
                                           self.validate_args else [], x)
    return (self.alpha * math_ops.log(self.beta) -
            math_ops.lgamma(self.alpha) -
            (self.alpha + 1.) * math_ops.log(x) - self.beta / x)
  def _prob(self, x):
    return math_ops.exp(self._log_prob(x))
 | 
	tensorflow.python.ops.math_ops.log | 78 | 
| 
	import tensorflow as tf
                'fast_rcnn_box_loss', tf.reduce_mean(fast_rcnn_box_loss),
                step=global_step)
            if params['include_mask']:
              tf.contrib.summary.scalar(
                  'mask_loss', tf.reduce_mean(mask_loss), step=global_step)
            tf.contrib.summary.scalar(
                'learning_rate', tf.reduce_mean(learning_rate),
                step=global_step)
            return tf.contrib.summary.all_summary_ops()
      # To log the loss, current learning rate, and epoch for Tensorboard, the
      # summary op needs to be run on the host CPU via host_call. host_call
      # expects [batch_size, ...] Tensors, thus reshape to introduce a batch
      # dimension. These Tensors are implicitly concatenated to
      # [params['batch_size']].
      global_step_t = tf.reshape(global_step, [1])
      total_loss_t = tf.reshape(total_loss, [1])
 | 
	tensorflow.contrib.summary.all_summary_ops | 79 | 
| 
	from tensorflow.python.framework import tensor_util
  if input_shape.ndims is None:
    return [tensor_shape.unknown_shape()]
  elif input_shape.ndims <= 1:
    return [tensor_shape.scalar()]
  dimension = tensor_util.ConstantValue(op.inputs[1])
  if dimension is None:
    return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
  elif 0 <= dimension and dimension < input_shape.ndims:
    returned_shape = []
 | 
	tensorflow.python.framework.tensor_util.ConstantValue | 80 | 
| 
	import tensorflow as tf
        out = tf.matmul(l1, self.w2)+self.b2
        return out
    def test_inference(self,images):
        images=tf.cast(images,tf.float32)/255.0
        l1 = tf.matmul(images, self.w1)+self.b1
        l1=tf.nn.relu(l1)
        out = tf.matmul(l1, self.w2)+self.b2
 | 
	tensorflow.cast | 81 | 
| 
	import tensorflow as tf
                          cols[3] / height,
                          cols[2] / width], axis=1)
        # add batch dimension (assume batch_size==1)
        #assert image.get_shape()[0] == 1
        boxes = tf.expand_dims(boxes, dim=0)
        image = tf.image.draw_bounding_boxes(image, boxes)   #  在image上画gt_truth
        return tf.summary.image('ground_truth', image)
    def _add_act_summary(self, tensor):
        tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)
        tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',
                          tf.nn.zero_fraction(tensor))
    def _add_score_summary(self, key, tensor):
        tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
    def _add_train_summary(self, var):
        tf.summary.histogram('TRAIN/' + var.op.name, var)
    # Custom Layers #
    def _reshape_layer(self, bottom, num_dim, name):
        input_shape = tf.shape(bottom)
        with tf.variable_scope(name):
 | 
	tensorflow.nn.zero_fraction | 82 | 
| 
	import tensorflow as tf
        hparams["type"] = "natural_exp_decay"
        hparams["kwargs"] = {
            "decay_steps": 1,
            "decay_rate": 0.5
        }
        ned_lr_decay_fn = opt.get_learning_rate_decay_fn(hparams)
        ned_lr = ned_lr_decay_fn(learning_rate=1., global_step=global_step)
        ned_lr_true = tf.train.natural_exp_decay(
            1., global_step-hparams["start_decay_step"],
            hparams["kwargs"]["decay_steps"], hparams["kwargs"]["decay_rate"])
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            pc_lr_, pc_lr_true_, ned_lr_, ned_lr_true_ = sess.run(
 | 
	tensorflow.train.natural_exp_decay | 83 | 
| 
	from tensorflow.python.training import summary_io
    # TODO(mdan): This line looks redundant.
    if self._summary_writer is None:
      self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
 | 
	tensorflow.python.training.summary_io.SummaryWriter | 84 | 
| 
	import tensorflow as tf
    centroids_mask = None
    centroids, lookup = get_unique(weights)
    num_centroids = tf.size(centroids)
    if self.preserve_sparsity:
      sparsity_mask = tf.math.divide_no_nan(weights, weights)
      zero_idx = tf.argmin(tf.abs(centroids), axis=-1)
      centroids_mask = 1.0 - tf.one_hot(zero_idx, num_centroids)
      result = {SPARSITY_MASK: sparsity_mask}
 | 
	tensorflow.math.divide_no_nan | 85 | 
| 
	from tensorflow.python.ops import array_ops
      array_ops.expand_dims(array_ops.constant(thresholds), [1]),
      array_ops.pack([1, num_predictions]))
  # Tile the predictions after thresholding them across different thresholds.
  pred_is_pos = math_ops.greater(
      array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
      thresh_tiled)
  pred_is_neg = math_ops.logical_not(pred_is_pos)
  # Tile labels by number of thresholds
  label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
  label_is_neg = math_ops.logical_not(label_is_pos)
  true_positives = _create_local('true_positives', shape=[num_thresholds])
  false_negatives = _create_local('false_negatives', shape=[num_thresholds])
  true_negatives = _create_local('true_negatives', shape=[num_thresholds])
  false_positives = _create_local('false_positives', shape=[num_thresholds])
  is_true_positive = math_ops.to_float(
      math_ops.logical_and(label_is_pos, pred_is_pos))
 | 
	tensorflow.python.ops.array_ops.tile | 86 | 
| 
	import tensorflow as tf
    
    def _create_model(self, train_triples):
        # Count unique items to determine embedding matrix sizes
        entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2]))
        rel_cnt = len(set(train_triples[:,1]))
        init_sd = 1.0 / np.sqrt(self.embedding_size)
        # Embedding variables
        entity_var_shape = [entity_cnt, self.embedding_size]
        rel_var_shape = [rel_cnt, self.embedding_size]
        entity_init  = tf.truncated_normal(entity_var_shape, stddev=init_sd)
        rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd)
        # Ensure maxnorm constraints are initially satisfied
        entity_init = dense_maxnorm(entity_init, self.maxnorm)
        self.entity_embedding_vars = tf.Variable(entity_init)
        self.rel_embedding_vars = tf.Variable(rel_init)
        # Embedding layer for each (head, rel, tail) triple being fed in as input
        head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input)
        tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input)
        rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input)
        # Relationship vector acts as a translation in entity embedding space
 | 
	tensorflow.truncated_normal | 87 | 
| 
	from tensorflow.contrib.eager.python.examples.spinn import data
        logdir=os.path.join(self._temp_data_dir, "logdir"),
        inference_sentences=("( foo ( bar . ) )", None))
    with self.assertRaises(ValueError):
      spinn.train_or_infer_spinn(embed, word2index, None, None, None, config)
  def testTrainSpinn(self):
    """Test with fake toy SNLI data and GloVe vectors."""
    # 1. Create and load a fake SNLI data file and a fake GloVe embedding file.
    snli_1_0_dir = os.path.join(self._temp_data_dir, "snli/snli_1.0")
    fake_train_file = self._create_test_data(snli_1_0_dir)
    vocab = data.load_vocabulary(self._temp_data_dir)
    word2index, embed = data.load_word_vectors(self._temp_data_dir, vocab)
    train_data = data.SnliData(fake_train_file, word2index)
    dev_data = data.SnliData(fake_train_file, word2index)
    test_data = data.SnliData(fake_train_file, word2index)
    # 2. Create a fake config.
    config = _test_spinn_config(
        data.WORD_VECTOR_LEN, 4,
        logdir=os.path.join(self._temp_data_dir, "logdir"))
    # 3. Test training of a SPINN model.
    trainer = spinn.train_or_infer_spinn(
 | 
	tensorflow.contrib.eager.python.examples.spinn.data.load_word_vectors | 88 | 
| 
	import tensorflow as tf
        name='logits_rl_w',
        initializer=tf.initializers.zeros(),
 | 
	tensorflow.initializers.zeros | 89 | 
| 
	from tensorflow.contrib.framework import deprecated
def _at_k_name(name, k=None, class_id=None):
  if k is not None:
    name = '%s_at_%d' % (name, k)
  else:
    name = '%s_at_k' % (name)
  if class_id is not None:
    name = '%s_class%d' % (name, class_id)
  return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
            'and reshape labels from [batch_size] to [batch_size, 1].')
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_recall_at_k(predictions, labels, k, ignore_mask=None,
                          weights=None, metrics_collections=None,
                          updates_collections=None, name=None):
  """Computes the recall@k of the predictions with respect to dense labels.
  The `streaming_recall_at_k` function creates two local variables, `total` and
  `count`, that are used to compute the recall@k frequency. This frequency is
  ultimately returned as `recall_at_<k>`: an idempotent operation that simply
 | 
	tensorflow.contrib.framework.deprecated | 90 | 
| 
	import tensorflow as tf
        tf.logging.info("removing {}".format(src_ckpt))
        tf.gfile.Remove(src_ckpt)
 | 
	tensorflow.gfile.Remove | 91 | 
| 
	from tensorflow.python.platform import tf_logging as logging
  def every_n_step_begin(self, step):
    super(NanLoss, self).every_n_step_begin(step)
    return [self._loss_tensor]
  def every_n_step_end(self, step, outputs):
    super(NanLoss, self).every_n_step_end(step, outputs)
    if np.isnan(_extract_output(outputs, self._loss_tensor)):
      failure_message = "Model diverged with loss = NaN."
      if self._fail_on_nan_loss:
        logging.error(failure_message)
        raise NanLossDuringTrainingError
      else:
        logging.warning(failure_message)
        # We don't raise an error but we return "should stop" so we stop, but
        # without an exception.
        return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
 | 
	tensorflow.python.platform.tf_logging.error | 92 | 
| 
	import tensorflow as tf
    save_dir = self._TestDir("abs_paths")
    abs_path = os.path.join(save_dir, "model-0")
    ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path)
    self.assertEqual(ckpt.model_checkpoint_path, abs_path)
    self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
    self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
    self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
  def testRelPath(self):
    train_dir = "train"
    model = os.path.join(train_dir, "model-0")
    # model_checkpoint_path should have no "train" directory part.
    new_rel_path = "model-0"
    ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model)
    self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
    self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
    self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
  def testAllModelCheckpointPaths(self):
    save_dir = self._TestDir("all_models_test")
    abs_path = os.path.join(save_dir, "model-0")
    for paths in [None, [], ["model-2"]]:
      ckpt = tf.train.generate_checkpoint_state_proto(
          save_dir,
          abs_path,
          all_model_checkpoint_paths=paths)
 | 
	tensorflow.train.generate_checkpoint_state_proto | 93 | 
| 
	import tensorflow as tf
            self.mu = self.mu * action_bound[1];
            self.sigma = self.sigma + 1e-4
            # get action from distribution
            self.normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma)
            self.action = tf.squeeze(self.normal_dist.sample(1),axis=0);
            self.action = tf.clip_by_value(self.action, action_bound[0], action_bound[1])
            
            # Loss and train op
            self.loss = -self.normal_dist.log_prob(self.a_his) * self.target
            # Add cross entropy cost to encourage exploration
            self.loss -= entropy_beta * self.normal_dist.entropy()
            self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
            self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
            self.grads=[];
            self.vars=[];
            for i in range(len(self.grads_and_vars)):
              self.grads.append(self.grads_and_vars[i][0]);
              self.vars.append(self.grads_and_vars[i][1]);
            self.grads=self.grads[-1*NUM_VARS:];
            self.vars=self.vars[-1*NUM_VARS:];
            self.train_op = self.optimizer.apply_gradients(
                self.grads_and_vars, global_step=tf.contrib.framework.get_global_step())
             
 | 
	tensorflow.train.AdamOptimizer | 94 | 
| 
	import tensorflow as tf
      self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
      self.assertEqual(
          len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
      self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
  def testUpdateCheckpointState(self):
    save_dir = self._TestDir("update_checkpoint_state")
    os.chdir(save_dir)
    # Make a temporary train directory.
    train_dir = "train"
    os.mkdir(train_dir)
    abs_path = os.path.join(save_dir, "model-0")
    rel_path = "train/model-2"
    tf.train.update_checkpoint_state(
        train_dir,
        rel_path,
        all_model_checkpoint_paths=[abs_path, rel_path])
    ckpt = tf.train.get_checkpoint_state(train_dir)
    self.assertEqual(ckpt.model_checkpoint_path, rel_path)
    self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
    self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
    self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
class MetaGraphTest(tf.test.TestCase):
 | 
	tensorflow.train.update_checkpoint_state | 95 | 
| 
	import tensorflow as tf
    self._train_op = optimizer.apply_gradients(
        zip(grads, tvars),
        global_step=tf.contrib.framework.get_or_create_global_step())
 | 
	tensorflow.contrib.framework.get_or_create_global_step | 96 | 
| 
	import tensorflow as tf
  Returns
  -------
  A tensor.
  """
  if axis < 0:
    dims = get_ndim(tensors[0])
    if dims:
      axis = axis % dims
    else:
      axis = 0
  try:
    return tf.concat_v2([x for x in tensors], axis)
  except AttributeError:
    return tf.concat(axis=axis, values=[x for x in tensors])
def _normalize_axis(axis, ndim):
  if isinstance(axis, tuple):
    axis = list(axis)
  if isinstance(axis, list):
    for i, a in enumerate(axis):
      if a is not None and a < 0:
        axis[i] = a % ndim
 | 
	tensorflow.concat_v2 | 97 | 
| 
	from tensorflow.contrib.layers.python.layers import utils
    def build_no_ops():
      return (tf.no_op(), tf.no_op())
    # Only make the ops if we know that `is_training=True`, or the value of
    # `is_training` is unknown.
    is_training_const = utils.constant_value(is_training)
    if is_training_const is None or is_training_const:
      update_mean_op, update_second_moment_op = utils.smart_cond(
          is_training,
          build_update_ops,
          build_no_ops,
 | 
	tensorflow.contrib.layers.python.layers.utils.constant_value | 98 | 
| 
	from tensorflow.python.framework import ops
  default_name = _at_k_name('false_negative', k, class_id=class_id)
  with ops.name_scope(name, default_name, (predictions_idx, labels)) as scope:
 | 
	tensorflow.python.framework.ops.name_scope | 99 | 
End of preview. Expand
						in Data Studio
					
README.md exists but content is empty.
								
- Downloads last month
- 2