seed
stringlengths
25
1.88k
seed_api
stringlengths
14
102
index
int64
0
1.05k
import tensorflow as tf transpose=transpose) if w_project is not None: x = tf.conv2d(x, w_project, strides, padding='SAME') # Set shape for BN in the residual function.
tensorflow.conv2d
700
from tensorflow.python.ops import array_ops if proba: raise ValueError( "logits to probabilities is not supported for _BinarySvmTargetColumn") logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) return math_ops.argmax(logits, 1) # TODO(zakaria): use contrib losses. def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target)) def _log_loss_with_two_classes(logits, target): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] target. if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits( labels=math_ops.to_float(target), logits=logits)
tensorflow.python.ops.array_ops.expand_dims
701
from tensorflow.contrib.learn.python.learn.estimators import composable_model """ super(_DNNLinearCombinedBaseEstimator, self).__init__( model_dir=model_dir, config=config) num_ps_replicas = config.num_ps_replicas if config else 0 self._linear_model = composable_model.LinearComposableModel( num_label_columns=target_column.num_label_columns, optimizer=linear_optimizer, gradient_clip_norm=gradient_clip_norm, num_ps_replicas=num_ps_replicas)
tensorflow.contrib.learn.python.learn.estimators.composable_model.LinearComposableModel
702
from tensorflow.python.ops import rnn_cell seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES)
tensorflow.python.ops.rnn_cell.LSTMCell
703
import tensorflow as tf cast0 = tf.dtypes.as_string(add if not swap else sub, name="TOSTR0") else: cast0 = tf.cast(add if not swap else sub, tf_output0_dtype, "CAST0") if tf_output1_dtype == tf.string: cast1 = tf.dtypes.as_string(sub if not swap else add, name="TOSTR1") else: cast1 = tf.cast(sub if not swap else add, tf_output1_dtype, "CAST1") out0 = tf.identity(cast0, "OUTPUT0")
tensorflow.dtypes.as_string
704
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression # Perform burn-in. linear_regression.fit(model, burn_in_dataset, optimizer)
tensorflow.contrib.eager.python.examples.linear_regression.linear_regression.fit
705
import tensorflow as tf if encoder.attn_keep_prob is not None: state_noise_shape = [1, tf.shape(state)[1]] if encoder.pervasive_dropout else None state = tf.nn.dropout(state, keep_prob=encoder.attn_keep_prob, noise_shape=state_noise_shape) hidden_noise_shape = [1, 1, tf.shape(hidden)[2]] if encoder.pervasive_dropout else None hidden = tf.nn.dropout(hidden, keep_prob=encoder.attn_keep_prob, noise_shape=hidden_noise_shape) if encoder.mult_attn: state = dense(state, encoder.attn_size, use_bias=False, name='state') hidden = dense(hidden, encoder.attn_size, use_bias=False, name='hidden') return tf.einsum('ijk,ik->ij', hidden, state) y = dense(state, encoder.attn_size, use_bias=not encoder.layer_norm, name='W_a') y = tf.expand_dims(y, axis=1) if encoder.layer_norm: y = tf.contrib.layers.layer_norm(y, scope='layer_norm_state') hidden = tf.contrib.layers.layer_norm(hidden, center=False, scope='layer_norm_hidden')
tensorflow.einsum
706
import tensorflow as tf if options.pointer_gen: with tf.variable_scope('calculate_pgen'): p_gen = linear([context_t, state_t.c, state_t.h, x], 1, True) # [batch_size, 1] p_gen = tf.sigmoid(p_gen) # Concatenate the cell_output (= decoder state) and the context vector, and pass them through a linear layer
tensorflow.sigmoid
707
from tensorflow.core.framework import function_pb2 Args: graph: Graph. inputs: List of tensors. Inputs to the function. outputs: List of tensors. Outputs of the function. out_names: Optional list of string names for the outputs. Returns: A FunctionDef protocol buffer. Raises: ValueError: if out_names is specified and the wrong length. """ func = function_pb2.FunctionDef() func.signature.name = "_" used_names = set() func.signature.input_arg.extend([_tensor_to_argdef(i, used_names=used_names) for i in inputs]) if out_names is None: used_names = set() func.signature.output_arg.extend([ _tensor_to_argdef(o, used_names=used_names) for o in outputs]) elif len(outputs) != len(out_names): raise ValueError( "Length of out_names (%d) does not match number of outputs (%d): %s" % (len(out_names), len(outputs), ", ".join(out_names)))
tensorflow.core.framework.function_pb2.FunctionDef
708
import tensorflow as tf run_config = tf.estimator.RunConfig( save_summary_steps=1, train_distribute=strategy, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_ckpt_steps, log_step_count_steps=1, ) else: distribution = tf.contrib.distribute.MirroredStrategy( num_gpus=FLAGS.num_gpus ) run_config = tf.estimator.RunConfig(train_distribute=distribution) model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint,
tensorflow.contrib.distribute.MirroredStrategy
709
import tensorflow as tf eval_input_fn = None ''' # Add hooks train_hooks = [ tf.train.StopAtStepHook(last_step=params.train_steps), tf.train.NanTensorHook(loss), # Monitors the loss tensor and stops training if loss is NaN tf.train.LoggingTensorHook( { "step": global_step, "loss": loss,
tensorflow.train.NanTensorHook
710
from tensorflow.contrib.rnn import BasicLSTMCell, RNNCell, DropoutWrapper, MultiRNNCell if encoder.cell_type.lower() == 'lstm': cell = CellWrapper(BasicLSTMCell(encoder.cell_size, reuse=reuse))
tensorflow.contrib.rnn.BasicLSTMCell
711
import tensorflow as tf argmax = lambda: tf.argmax(output_, 1) target = lambda: inputs.read(time + 1) softmax = lambda: tf.squeeze(tf.multinomial(tf.log(tf.nn.softmax(output_)), num_samples=1), axis=1) use_target = tf.logical_and(time < time_steps - 1, tf.random_uniform([]) >= feed_previous) predicted_symbol = tf.case([ (use_target, target), (tf.logical_not(feed_argmax), softmax)], default=argmax) # default case is useful for beam-search predicted_symbol.set_shape([None]) predicted_symbol = tf.stop_gradient(predicted_symbol) input_ = embed(predicted_symbol) pos = update_pos(pos, predicted_symbol, encoder_input_length[align_encoder_id])
tensorflow.logical_not
712
import tensorflow as tf kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights`
tensorflow.nn.bias_add
713
from tensorflow.python import tf2 def _check_tensorflow_version(): """Check that we're using a compatible TF version. Raises a warning if either Tensorflow version is less that 2.0 or TF 2.x is not enabled. If TF 2.x is enabled, but version is < TF 2.3, raises a warning to indicate that resources may not be initialized. """ major, minor, _ = tf.version.VERSION.split('.') if not (int(major) >= 2 and tf2.enabled()): tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer is supported ' 'only for TF 2.x with TF 2.x behaviors enabled and may not work as ' 'intended.', tf.version.VERSION) elif int(major) == 2 and int(minor) < 3: # TODO(varshaan): Log a more specific warning. tf.compat.v1.logging.warning( 'Tensorflow version (%s) found. TransformFeaturesLayer may not work ' 'as intended if the SavedModel contains an initialization op.', tf.version.VERSION)
tensorflow.python.tf2.enabled
714
import tensorflow as tf Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed Li_eKuffu = tf.matrix_triangular_solve(Luu_tiled, eKuffu, lower=True) Li_eKuffu_Lit = tf.matrix_triangular_solve(Luu_tiled, tf.matrix_transpose(Li_eKuffu), lower=True) # N x M x M cov = tf.matmul(q_sqrt_r, q_sqrt_r, transpose_b=True) # D x M x M if mean_function is None or isinstance(mean_function, mean_functions.Zero): e_related_to_mean = tf.zeros((num_data, num_func, num_func), dtype=settings.float_type) else: # Update mean: \mu(x) + m(x)
tensorflow.matrix_transpose
715
from tensorflow.python.ops import math_ops if self._n_classes < 2: loss_vec = math_ops.square(logits - math_ops.to_float(target)) elif self._n_classes == 2: loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target)) else: loss_vec = nn.sparse_softmax_cross_entropy_with_logits( logits, array_ops.reshape(target, [-1]))
tensorflow.python.ops.math_ops.to_float
716
from tensorflow.python.ops.rnn_cell_impl import _Linear [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''):
tensorflow.python.ops.rnn_cell_impl._Linear
717
import tensorflow as tf var_shape = var.get_shape().as_list() if var_shape == saved_shapes[name]: restored.append(var) else: shape_conflicts.add(name) found_names -= shape_conflicts return (restored, sorted(found_names), sorted(missing_names), sorted(shape_conflicts)) def load(self, checkpoint_path, flexible_restore=True): if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) if checkpoint_path is None: raise ValueError('Checkpoint directory is empty.') if flexible_restore: var_list, found, missing, conflicts = self._checkpoint_var_search( checkpoint_path) tf.logging.info('Restoring variables: \n\t{}'.format( '\n\t'.join(found))) if len(missing) > 0:
tensorflow.gfile.IsDirectory
718
import tensorflow as tf # regularizer = tf.contrib.layers.l2_regularizer(0.001) # reg = regularizer(embedding_variable) # loss += reg return loss def crf_decode_layer(self, logits, crf_params, nwords): with tf.name_scope("CRF_decode"): pred_ids, _ = tf.contrib.crf.crf_decode(logits, crf_params, nwords) return pred_ids def compute_metrics(self, tags, pred_ids, num_tags, indices, nwords): weights = tf.sequence_mask(nwords) # metrics_correct_rate, golden, predict = correct_rate(tags, pred_ids)
tensorflow.contrib.crf.crf_decode
719
import tensorflow as tf if self.has_inputs: data_fields["inputs"] = tf.VarLenFeature(tf.int64) # hack: ignoring true targets and putting dist_targets in targets data_items_to_decoders = { "inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"), "targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"), } return (data_fields, data_items_to_decoders) def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): """Get vocab for distill problems."""
tensorflow.contrib.slim.tfexample_decoder.Tensor
720
from tensorflow.python.ops import math_ops count = _create_local('count_tensor', shape=values.get_shape()) num_values = array_ops.ones_like(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.mul(num_values, weights) total_compute_op = state_ops.assign_add(total, values) count_compute_op = state_ops.assign_add(count, num_values) def compute_mean(total, count, name): non_zero_count = math_ops.maximum(count, array_ops.ones_like(count), name=name) return math_ops.truediv(total, non_zero_count, name=name) mean = compute_mean(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = compute_mean(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean, update_op
tensorflow.python.ops.math_ops.truediv
721
from tensorflow.python.ops import gen_resource_variable_ops return None @property def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies():
tensorflow.python.ops.gen_resource_variable_ops.read_variable_op
722
import tensorflow as tf # queue. image, label = read_and_decode(filename_queue) # Shuffle the examples and collect them into batch_size batches. # (Internally uses a RandomShuffleQueue.) # We run this in two threads to avoid being a bottleneck. images, sparse_labels = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=8, capacity=1000 + 3 * batch_size, # Ensures a minimum amount of shuffling of examples. min_after_dequeue=1000)
tensorflow.train.shuffle_batch
723
import tensorflow as tf FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('ws_save_path', './models_ws/model.ckpt', 'WS: model\'s save path') tf.app.flags.DEFINE_float('ws_prune_ratio', 0.75, 'WS: target pruning ratio') tf.app.flags.DEFINE_string('ws_prune_ratio_prtl', 'optimal', 'WS: pruning ratio protocol (\'uniform\' | \'heurist\' | \'optimal\')') tf.app.flags.DEFINE_integer('ws_nb_rlouts', 200, 'WS: # of roll-outs for the RL agent') tf.app.flags.DEFINE_integer('ws_nb_rlouts_min', 50, 'WS: minimal # of roll-outs for the RL agent to start training') tf.app.flags.DEFINE_string('ws_reward_type', 'single-obj', 'WS: reward type (\'single-obj\' OR \'multi-obj\')') tf.app.flags.DEFINE_float('ws_lrn_rate_rg', 3e-2, 'WS: learning rate for layerwise regression')
tensorflow.app.flags.DEFINE_integer
724
import tensorflow as tf pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) # Optimizer with tf.name_scope('optimizer'): if model_str == 'gcn_ae': opt = OptimizerAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]), pos_weight=pos_weight, norm=norm) elif model_str == 'gcn_vae': opt = OptimizerVAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]),
tensorflow.sparse_tensor_to_dense
725
import tensorflow as tf normalized_grad = old_div(grad, avoid_nan_norm) elif ord == 2: red_ind = list(range(1, len(x.get_shape()))) avoid_zero_div = 1e-8 square = tf.maximum(avoid_zero_div, reduce_sum(tf.square(grad), reduction_indices=red_ind, keepdims=True)) normalized_grad = old_div(grad, tf.sqrt(square)) else: normalized_grad = tf.sign(grad) normalized_grad = tf.stop_gradient(normalized_grad) scaled_grad = eps * normalized_grad #目标是让loss下降 adv_x = x - scaled_grad if (clip_min is not None) and (clip_max is not None): adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
tensorflow.sign
726
import tensorflow as tf cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=classes, num_decoder_symbols=classes, embedding_size=24) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, per_example_loss=per_example_loss) # Now we construct the copy model. inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
tensorflow.nn.seq2seq.model_with_buckets
727
import tensorflow as tf else: q = tf.nn.softmax(q_logits) p = tf.nn.softmax(p_logits) kl = tf.reduce_sum(q * (tf.log(q) - tf.log(p)), 1) num_labels = tf.reduce_sum(weights) num_labels = tf.where(tf.equal(num_labels, 0.), 1., num_labels) kl.get_shape().assert_has_rank(2) weights.get_shape().assert_has_rank(1) loss = tf.identity(tf.reduce_sum(tf.expand_dims(weights, -1) * kl) / num_labels, name='kl') return loss
tensorflow.equal
728
from tensorflow.python.platform import gfile IOError, lambda e: "Cannot parse file"): tf.train.import_meta_graph(filename) # Deletes the file gfile.Remove(filename) with self.assertRaisesWithPredicateMatch( IOError, lambda e: "does not exist"):
tensorflow.python.platform.gfile.Remove
729
import tensorflow as tf # compute norms in case they need to be logged self.gradient_norms = [tf.norm(g) + NUMTOL for (g, v) in clipped_grads_and_vars] self.weight_norms = [tf.norm(v) + NUMTOL for (g, v) in clipped_grads_and_vars] # check that gradients are finite grads = [tf.check_numerics(g, "grads is not finite") for (g, v) in clipped_grads_and_vars] variables = [tf.check_numerics(v, "grads is not finite") for (g, v) in clipped_grads_and_vars] self.gradient_weight_global_norms = [tf.global_norm(grads), tf.global_norm(variables)] # 2nd part of minimize: apply_gradient optimizer_step = self._optimizer.apply_gradients(clipped_grads_and_vars, global_step=self.global_step)
tensorflow.check_numerics
730
import tensorflow as tf logit = tf.nn.xw_plus_b(H_drop, W, b, name="scores") predictions = tf.nn.softmax(logit, name="predictions") #claulate loss and optimizer with tf.variable_scope("FCoptimize", reuse=None): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= logit, labels=Y) + l2_reg_lambda * l2_loss) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) # calculate accuracy correct_predictions = tf.equal(tf.argmax(predictions, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") print ("done...") print ("************") path='save/' ckpt_name = 'save/model.ckpt' fname = 'model.tf' dst_nodes = ['output/predictions'] saver = tf.train.Saver()
tensorflow.argmax
731
import tensorflow as tf hist_rater_b = tf.reduce_sum(labels, 0) conf_mat = tf.matmul(tf.transpose(pred_norm), labels) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [num_ratings, 1]), tf.reshape(hist_rater_b, [1, num_ratings])) / tf.to_float(batch_size)) try: return -(1 - nom / denom) except Exception: return -(1 - nom / (denom + eps))
tensorflow.to_float
732
from tensorflow.python.eager import context initial_output = tf.slice(initial_output, [0, 0, 0, 0], common_layers.shape_list(initial_output)) target_modality = self._problem_hparams.target_modality if target_modality.is_class_modality: decode_length = 1 else: decode_length = common_layers.shape_list( features["inputs"])[1] + decode_length # Initial values of result, logits and loss. result = initial_output # tensor of shape [batch_size, time, 1, 1, vocab_size] logits = tf.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality)) if not context.in_eager_mode(): logits.set_shape([None, None, None, None, None]) loss = 0.0 def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument """Exit the loop either if reach decode_length or EOS.""" length = common_layers.shape_list(result)[1] not_overflow = length < decode_length if self._problem_hparams.stop_at_eos:
tensorflow.python.eager.context.in_eager_mode
733
from tensorflow.python.framework import ops total = _create_local('total', shape=[]) count = _create_local('count', shape=[]) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.reduce_sum(_broadcast_weights(weights, values)) else: num_values = math_ops.to_float(array_ops.size(values)) total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) count_compute_op = state_ops.assign_add(count, num_values) mean = _safe_div(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]): update_op = _safe_div(total, count, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, mean) if updates_collections: ops.add_to_collections(updates_collections, update_op) return mean, update_op def streaming_mean_tensor(values, weights=None, metrics_collections=None, updates_collections=None, name=None):
tensorflow.python.framework.ops.control_dependencies
734
from tensorflow.contrib import metrics as metrics_lib result = {"loss": metrics_lib.streaming_mean(self._loss( logits, targets, weight_tensor=self._get_weight_tensor(features)))} # Adding default metrics if metrics is None and self._n_classes > 1: metrics = {"accuracy": metrics_lib.streaming_accuracy} if self._n_classes == 2: predictions = math_ops.sigmoid(logits) result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets) if metrics: predictions = self._logits_to_predictions(logits, proba=False) result.update(self._run_metrics(predictions, targets, metrics, self._get_weight_tensor(features))) return result def _get_predict_ops(self, features):
tensorflow.contrib.metrics.streaming_auc
735
from tensorflow.python.training import server_lib cs = server_lib.ClusterSpec({ "worker": ["localhost:%s" % port1], "ps": ["localhost:%s" % port2] }) worker = server_lib.Server(cs, job_name="worker", start=True) ps = server_lib.Server(cs, job_name="ps", start=True) return worker, ps @contextlib.contextmanager def _maybeWithDevice(self, device):
tensorflow.python.training.server_lib.Server
736
import tensorflow as tf mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # In this example, we limit mnist data Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) Xte, Yte = mnist.test.next_batch(200) #200 for testing # tf Graph Input xtr = tf.placeholder("float", [None, 784]) xte = tf.placeholder("float", [784]) # Nearest Neighbor calculation using L1 Distance # Calculate L1 Distance distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1) # Prediction: Get min distance index (Nearest neighbor) pred = tf.arg_min(distance, 0) accuracy = 0. # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.compat.v1.Session() as sess: # Run the initializer sess.run(init) # Add the fault injection code here to instrument the graph # We start injecting the fault right away here unlike earlier
tensorflow.arg_min
737
import tensorflow as tf epsilon=hparams.vq_epsilon, ema=hparams.ema, means=means) inputs = None batch_size = hparams.batch_size targets = tf.random_uniform([batch_size, hparams.img_len, hparams.img_len, hparams.hidden_size], minval=-1., maxval=1.) target_space_id = None tf.train.create_global_step() decoder_output, losses, cache = latent_layers.transformer_autoencoder( inputs, targets, target_space_id, hparams) self.assertEqual(set(six.iterkeys(losses)), {"extra", "extra_loss", "latent_pred"}) self.evaluate(tf.global_variables_initializer()) decoder_output_, extra_loss_, latent_pred_ = self.evaluate( [decoder_output, losses["extra_loss"], losses["latent_pred"]]) self.assertEqual(decoder_output_.shape, (batch_size, hparams.img_len, hparams.img_len,
tensorflow.train.create_global_step
738
from tensorflow.python.ops import gen_nn_ops """ with ops.op_scope([value, bias], name, "BiasAdd") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name) ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
tensorflow.python.ops.gen_nn_ops._bias_add
739
from tensorflow.python.training import server_lib # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({})
tensorflow.python.training.server_lib.ClusterSpec
740
import tensorflow as tf from google.protobuf import text_format import tensorflow as tf import preprocessing import datasets NUM_TEST_IMAGES = 50000 def load_graph(model_file): graph = tf.Graph() graph_def = tf.compat.v1.GraphDef() import os file_ext = os.path.splitext(model_file)[1] with open(model_file, "rb") as f: if file_ext == '.pbtxt': text_format.Merge(f.read(), graph_def) else: graph_def.ParseFromString(f.read())
tensorflow.compat.v1.GraphDef
741
from tensorflow.python.ops import math_ops def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return math_ops.log(self._cdf(x)) def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.alpha, self.beta / x) @distribution_util.AppendDocstring( """This is defined to be ``` entropy = alpha - log(beta) + log(Gamma(alpha)) + (1-alpha)digamma(alpha) ``` where digamma(alpha) is the digamma function.""")
tensorflow.python.ops.math_ops.igammac
742
import tensorflow as tf # wrapper to make the optimizer work with TPUs if params['use_tpu']: gen_optimizer = tf.contrib.tpu.CrossShardOptimizer(gen_optimizer) dis_optimizer = tf.contrib.tpu.CrossShardOptimizer(dis_optimizer) gan_train_ops = tf.contrib.gan.gan_train_ops(gan_model, gan_loss, gen_optimizer, dis_optimizer)
tensorflow.contrib.tpu.CrossShardOptimizer
743
from tensorflow.python.ops import math_ops Returns: A [D1, ... DN] `Tensor` of false negative counts. """ with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)): labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx, class_id) fn = set_ops.set_size(set_ops.set_difference(predictions_idx, labels, aminusb=False)) fn = math_ops.to_double(fn) if weights is not None: weights = math_ops.to_double(weights) fn = math_ops.mul(fn, weights) return fn def _streaming_sparse_false_negative_at_k(predictions_idx, labels, k, class_id=None, weights=None, name=None): """Calculates weighted per step false negatives for recall@k.
tensorflow.python.ops.math_ops.to_double
744
import tensorflow as tf # Create connected layers: fc1, fc2 with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected], normalizer_fn=tf.contrib.layers.batch_norm,
tensorflow.contrib.framework.arg_scope
745
from tensorflow.python.framework import ops mean, tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) res += 0. * new_mean * new_var * new_step return res # batch normalization taking into account the volume transformation def batch_norm_log_diff(input_,
tensorflow.python.framework.ops.colocate_with
746
from tensorflow.python.ops import math_ops @distribution_util.AppendDocstring( """Variance for inverse gamma is defined only for `alpha > 2`. If `self.allow_nan_stats` is `False`, an exception will be raised rather than returning `NaN`.""") def _variance(self): var = (math_ops.square(self.beta) / (math_ops.square(self.alpha - 1.) * (self.alpha - 2.))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 2., var, array_ops.fill(self.batch_shape(), nan, name="nan")) else:
tensorflow.python.ops.math_ops.square
747
import tensorflow as tf else: #was used to train LM c = tf.nn.conv1d(x, w, stride=1, padding=pad)+b
tensorflow.nn.conv1d
748
import tensorflow as tf logits = tf.reshape(logits, [-1, bil_lstm_win_size, 256*amp_factor]) forward_cell = tf.nn.rnn_cell.LSTMCell(128) backward_cell = tf.nn.rnn_cell.LSTMCell(128) encoder_outputs,_ = tf.nn.bidirectional_dynamic_rnn( forward_cell, backward_cell, logits,
tensorflow.nn.bidirectional_dynamic_rnn
749
import tensorflow as tf "b", bias_var_shape, initializer=tf.constant_initializer(0.0)) if not one_dim_bias and data_format == 'NHWC': b = tf.reshape(b, bshape) return tf.nn.conv2d( x, w, strides=strides, padding=pad,
tensorflow.nn.conv2d
750
import tensorflow as tf logits += self.b_soft_no_learn op_id = tf.multinomial(logits, 1)
tensorflow.multinomial
751
from tensorflow.python.ops import gen_math_ops def tanh(x, name=None): """Computes hyperbolic tangent of `x` element-wise. Args: x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`, or `qint32`. name: A name for the operation (optional). Returns: A Tensor with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.op_scope([x], name, "Tanh") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._tanh(x, name=name) ops.RegisterShape("Abs")(common_shapes.unchanged_shape) ops.RegisterShape("Ceil")(common_shapes.unchanged_shape) ops.RegisterShape("Conj")(common_shapes.unchanged_shape) ops.RegisterShape("Cos")(common_shapes.unchanged_shape) ops.RegisterShape("Exp")(common_shapes.unchanged_shape) ops.RegisterShape("Floor")(common_shapes.unchanged_shape) ops.RegisterShape("Imag")(common_shapes.unchanged_shape) ops.RegisterShape("Inv")(common_shapes.unchanged_shape) ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape) ops.RegisterShape("IsInf")(common_shapes.unchanged_shape) ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
tensorflow.python.ops.gen_math_ops._tanh
752
import tensorflow as tf def _forward(self, x1, x2, **kwargs): log_sigmas = self.parameterizer(x1) z2, fldj = half_gaussianize(x2, log_sigmas) return z2, fldj def _inverse(self, x1, z2, **kwargs): log_sigmas = self.parameterizer(x1) x2, ildj = half_gaussianize(z2, log_sigmas, inverse=tf.constant(True)) return x2, ildj def exponentiate(x, log_lambdas, inverse=tf.constant(False)): if not inverse: z = tf.math.exp(log_lambdas)*x ldj = tf.math.reduce_sum(log_lambdas, axis=[1,2,3]) else: z = x*tf.math.exp(-log_lambdas) ldj = -tf.math.reduce_sum(log_lambdas, axis=[1,2,3]) return z, ldj class Exponentiate(Parameterize): """ Implementation of parameterize for an exponetial prior. """ def __init__(self, input_shape=None, name='gaussianize', *args, **kwargs):
tensorflow.math.exp
753
import tensorflow as tf filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32), loss_class_mask) filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b) filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class]) loss_class = tf.keras.losses.sparse_categorical_crossentropy( y_true=filter_class_true, y_pred=filter_class_pred) loss_class = tf.reduce_mean(loss_class)
tensorflow.keras.losses.sparse_categorical_crossentropy
754
from tensorflow.python.ops import check_ops value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor)
tensorflow.python.ops.check_ops.assert_type
755
import tensorflow as tf if encoder.binary: raise NotImplementedError pad = tf.nn.embedding_lookup(embeddings, utils.BOS_ID) pad = tf.expand_dims(tf.expand_dims(pad, axis=0), axis=1) pad = tf.tile(pad, [batch_size, 1, 1])
tensorflow.nn.embedding_lookup
756
import tensorflow as tf beam_width=beam_width, blank_index=blank_index, top_paths=1, blank_label=0) decoded = tf.sparse.SparseTensor(indices[0], values[0], shape[0]) decoded = tf.cast(tf.sparse.to_dense(decoded), tf.int32) decoded_u = tf.sparse.SparseTensor(indices_u[0], values_u[0], shape_u[0]) decoded_u = tf.cast(tf.sparse.to_dense(decoded_u), tf.int32) # Adjust event vals according to representation decoded = tf.where(tf.not_equal(decoded, 0), decoded+shift, decoded)
tensorflow.sparse.SparseTensor
757
from tensorflow.python.ops import math_ops predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions, normalizer = tensor_util.remove_squeezable_dimensions( predictions, normalizer) predictions.get_shape().assert_is_compatible_with(normalizer.get_shape()) relative_errors = math_ops.select( math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.div(math_ops.abs(labels - predictions), normalizer)) return streaming_mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error') def streaming_mean_squared_error(predictions, labels, weights=None, metrics_collections=None, updates_collections=None,
tensorflow.python.ops.math_ops.abs
758
from tensorflow.python.ops import gen_math_ops A `Tensor` of the same type as `a`. """ with ops.op_scope([a, b], name, "MatMul") as name: a = ops.convert_to_tensor(a, name="a") b = ops.convert_to_tensor(b, name="b") if a.dtype == types.float32 and (a_is_sparse or b_is_sparse): return sparse_matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) else: return gen_math_ops._mat_mul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) sparse_matmul = gen_math_ops._sparse_mat_mul batch_matmul = gen_math_ops._batch_mat_mul ops.RegisterShape("MatMul")(common_shapes.matmul_shape) ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape) def _as_indexed_slices(x):
tensorflow.python.ops.gen_math_ops._mat_mul
759
import tensorflow as tf np.random.seed(self.seed) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tensorflow.compat.v1.logging.set_verbosity
760
import tensorflow as tf channels = tf.unstack(image, axis=-1) image = tf.stack([channels[2], channels[1], channels[0]], axis=-1) # dims for normalization width = tf.to_float(tf.shape(image)[2]) height = tf.to_float(tf.shape(image)[1]) # from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1] cols = tf.unstack(boxes, axis=1) boxes = tf.stack([cols[1] / height, cols[0] / width, cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor)) def _add_score_summary(self, key, tensor): tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor) def _add_train_summary(self, var): tf.summary.histogram('TRAIN/' + var.op.name, var)
tensorflow.image.draw_bounding_boxes
761
import tensorflow as tf correct = tf.cast(correct, tf.float32) accuracy = tf.reduce_mean(correct)*100.0 tf.summary.scalar(scope+'accuracy',accuracy) return accuracy def num_correct_prediction(logits, labels): ''' Evaluate the quality of the logits at predicting the label ''' correct = tf.equal(tf.arg_max(logits,1), tf.arg_max(labels,1)) correct = tf.cast(correct, tf.int32) n_correct = tf.reduce_sum(correct) return n_correct def optimize(loss, learning_rate, global_step): ''' Optimization, use Gradient Descent as default '''
tensorflow.arg_max
762
import tensorflow as tf features = sp.identity(features.shape[0]) # featureless # Some preprocessing adj_norm = preprocess_graph(adj) # Define placeholders placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape=()) } num_nodes = adj.shape[0] features = sparse_to_tuple(features.tocoo())
tensorflow.sparse_placeholder
763
from tensorflow.python.framework import ops def _define_vars(self, params): pass def inference_graph(self, data): with ops.device(self.device_assigner): # Compute activations for the neural network. nn_activations = [layers.fully_connected(data, self.params.layer_size)] for _ in range(1, self.params.num_layers):
tensorflow.python.framework.ops.device
764
import tensorflow as tf vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1),
tensorflow.string_split
765
import tensorflow as tf next_indices = tf.stack([neighbor.indices[:, 0], next_idx], 1) next_values = weight.values next_shape = tf.stack([tf.size(nodes), tf.size(next_nodes)]) next_shape = tf.cast(next_shape, tf.int64) next_adj = tf.SparseTensor(next_indices, next_values, next_shape) next_adj = tf.sparse_reorder(next_adj) nodes_list.append(next_nodes) adj_list.append(next_adj) nodes = next_nodes return nodes_list, adj_list
tensorflow.sparse_reorder
766
from tensorflow.python.ops import sparse_ops hashed_output=True, num_buckets=1024, hash_key=layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) cross_dense = sparse_ops.sparse_tensor_to_dense(cross) with session.Session(): values = cross_dense.eval()
tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense
767
import tensorflow as tf batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) return d return input_fn def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t
tensorflow.parse_single_example
768
from tensorflow.python.feature_column import feature_column_lib as core_feature_column model = estimator.GradientBoostedDecisionTreeEstimator( head=head_fn, learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True) model.fit(input_fn=_train_input_fn, steps=15) model.evaluate(input_fn=_eval_input_fn, steps=1) model.export(self._export_dir_base)
tensorflow.python.feature_column.feature_column_lib.numeric_column
769
import tensorflow.contrib.graph_editor as ge for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
tensorflow.contrib.graph_editor.get_backward_walk_ops
770
from tensorflow.contrib.eager.python.examples.revnet import revnet with tfe.execution_mode(execution_mode): device, data_format = device_and_format model = revnet.RevNet(config=config) if defun:
tensorflow.contrib.eager.python.examples.revnet.revnet.RevNet
771
import tensorflow as tf eval_metric_ops=eval_metric_ops ) assert mode == tf.estimator.ModeKeys.TRAIN with tf.variable_scope('learning_rate'): global_step = tf.train.get_global_step() learning_rate = tf.train.cosine_decay( params['initial_learning_rate'], global_step, decay_steps=params['num_steps'] ) tf.summary.scalar('learning_rate', learning_rate)
tensorflow.train.cosine_decay
772
import tensorflow.contrib.graph_editor as ge if hasattr(ops, '__iter__') and not isinstance(ops, str): l = [(op.name if hasattr(op, "name") else str(op)) for op in ops] if sort_outputs: return sorted(l) return l else: return ops.name if hasattr(ops, "name") else str(ops) def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before): for op in wait_to_do_ops: ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs] ge.add_control_inputs(op, ci)
tensorflow.contrib.graph_editor.add_control_inputs
773
import tensorflow as tf train = dataset['train'].map( load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) test = dataset['test'].map(load_image_test) train_dataset = train.cache().shuffle(1000).batch(batch_size).repeat() train_dataset = train_dataset.prefetch( buffer_size=tf.data.experimental.AUTOTUNE) test_dataset = test.batch(batch_size) config = hparams_config.get_efficientdet_config('efficientdet-d0') config.heads = ['segmentation'] model = efficientdet_keras.EfficientDetNet(config=config) model.build((1, 512, 512, 3)) model.compile( optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) val_subsplits = 5 val_steps = info.splits['test'].num_examples // batch_size // val_subsplits model.fit( train_dataset, epochs=20, steps_per_epoch=steps_per_epoch, validation_steps=val_steps, validation_data=test_dataset, callbacks=[]) model.save_weights('./test/segmentation')
tensorflow.keras.losses.SparseCategoricalCrossentropy
774
import tensorflow as tf # load pretrained model vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) assign_ops = [] for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops) print('Model loaded.') result = sess.run(output) cv2.imwrite("output.png", result[0]) tf.reset_default_graph() #return FileResponse("output.png", media_type="image/png") with open("output.png", "rb") as image_file: image_string = "data:image/png;base64,{}".format(base64.b64encode(image_file.read()).decode()) return { "image": image_string } if __name__ == '__main__': uvicorn.run(app, host="0.0.0.0", port=8080)
tensorflow.reset_default_graph
775
from tensorflow.contrib.tpu.python.tpu.datasets import StreamingFilesDataset if epochs_between_evals > 1: raise ValueError("epochs_between_evals > 1 not supported for file " "based dataset.") epoch_data_dir = self._result_queue.get(timeout=300) if not self._is_training: self._result_queue.put(epoch_data_dir) # Eval data is reused. file_pattern = os.path.join( epoch_data_dir, rconst.SHARD_TEMPLATE.format("*")) dataset = StreamingFilesDataset( files=file_pattern, worker_job="worker", num_parallel_reads=rconst.NUM_FILE_SHARDS, num_epochs=1, sloppy=not self._deterministic) map_fn = functools.partial(self._deserialize, batch_size=batch_size) dataset = dataset.map(map_fn, num_parallel_calls=16) else: types = {movielens.USER_COLUMN: rconst.USER_DTYPE,
tensorflow.contrib.tpu.python.tpu.datasets.StreamingFilesDataset
776
import tensorflow as tf # sum over hidden units num = tf.reduce_sum(tf.square(num), axis=2) denom = tf.reduce_sum(tf.square(denom), axis=2) bounded = tf.where(tf.greater(denom, 1e-20), tf.div(num, 1.0 * denom), tf.ones_like(num)) nelems = tf.reduce_mean(tf.where(tf.greater(denom, 1e-20), 1.0 * tf.ones_like(num), 1.0 * tf.zeros_like(num)), axis=1) # sum mean over each batch by time steps Omega = tf.square(bounded - 1.0) Omega = tf.reduce_sum(tf.reduce_mean(Omega, axis=1)) / (1.0 * tf.reduce_sum(nelems)) out = tf.gradients(Omega, self.W_rec) out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads") out[0] = tf.verify_tensor_all_finite(out[0], "dead omega grad") return out, test def sussillo_reg(self): states = self.states reg = 0 for state in states: dJr = tf.matmul(tf.nn.relu(state), tf.matmul(tf.abs(self.W_rec) * self.rec_Connectivity, self.Dale_rec)) reg += tf.reduce_sum(tf.square(dJr))
tensorflow.verify_tensor_all_finite
777
from tensorflow.python.ops import nn logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1)
tensorflow.python.ops.nn.softmax
778
import tensorflow as tf # -1 otherwise. For instance, candidate[3] = id(token[3] + token[4]). # First, split into basic UTF-8 characters (letters). chars = tf.strings.unicode_split(word, 'UTF-8') tokens = self._StringToToken(chars)
tensorflow.strings.unicode_split
779
import tensorflow as tf x: The input tensor. prediction: The prediction class tensor. output_class: The output tensor. sess: The graph session. """ # input label placeholder y = tf.placeholder("float", [None, self.n_classes]) # Loss function loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)) # Optimization opt = tf.train.AdamOptimizer( learning_rate=self.learning_rate).minimize(loss) # Initialize variables init = tf.global_variables_initializer() sess.run(init) for _ in range(TRAIN_STEPS): batch_x, batch_y = self.mnist.train.next_batch( batch_size=self.batch_size, shuffle=False) batch_x = batch_x.reshape((self.batch_size, self.time_steps, self.n_input)) sess.run(opt, feed_dict={x: batch_x, y: batch_y}) def saveAndRestoreModel(self, lstm_layer, sess, saver, is_dynamic_rnn): """Saves and restores the model to mimic the most common use case. Args: lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
tensorflow.global_variables_initializer
780
import tensorflow as tf logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) labels = tf.reshape(labels, [-1]) one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, log_probs)
tensorflow.one_hot
781
from tensorflow.python.framework import sparse_tensor linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3)) input_fn = test_data.iris_input_multiclass_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertCommonMetrics(metrics) def benchmarkPartitionedVariables(self): def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=('en', 'fr', 'zh'), indices=((0, 0), (0, 1), (2, 0)), dense_shape=(3, 2)) } labels = constant_op.constant(((1,), (0,), (0,))) return features, labels # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. sparse_feature = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) embedding_feature = feature_column.embedding_column( sparse_feature, dimension=1)
tensorflow.python.framework.sparse_tensor.SparseTensor
782
import tensorflow as tf # Write images to disk. image_write_ops = None if FLAGS.write_to_disk: image_write_ops = tf.write_file( '%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'), tf.image.encode_png(data_provider.float_image_to_uint8( reshaped_img[0]))) # For unit testing, use `run_eval_loop=False`. if not run_eval_loop: return tf.contrib.training.evaluate_repeatedly( FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops, max_number_of_evaluations=FLAGS.max_number_of_evaluations) def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually # construct the desired class labels. num_images_generated = num_images_per_class * num_classes noise = tf.random_normal([num_images_generated, noise_dims]) labels = [lbl for lbl in range(num_classes) for _ in range(num_images_per_class)]
tensorflow.contrib.training.SummaryAtEndHook
783
import tensorflow as tf self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size]) self.v_attn = tf.get_variable("v", [self.lstm_size, 1]) def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False): """Build the sampler ops and the log_prob ops.""" print ("-" * 80) print ("Build controller sampler") anchors = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) anchors_w_1 = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4) if prev_c is None: assert prev_h is None, "prev_c and prev_h must both be None" prev_c = [tf.zeros([1, self.lstm_size], tf.float32) for _ in range(self.lstm_num_layers)]
tensorflow.TensorArray
784
import tensorflow as tf If `x` is integral, the output is cast to [u]int64. If `x` is sparse and reduce_inst_dims is False will return 0 in place where column has no values across batches. Raises: TypeError: If the type of `x` is not supported. """ with tf.compat.v1.name_scope(name, 'sum'): if reduce_instance_dims: x = tf.reduce_sum(input_tensor=tf_utils.get_values(x)) elif isinstance(x, tf.SparseTensor): if x.dtype == tf.uint8 or x.dtype == tf.uint16: x = tf.cast(x, tf.int64) elif x.dtype == tf.uint32 or x.dtype == tf.uint64: TypeError('Data type %r is not supported' % x.dtype) x = tf.sparse.reduce_sum(x, axis=0) elif isinstance(x, tf.RaggedTensor): raise NotImplementedError( 'Elementwise sum does not support RaggedTensors.') else: x = tf.reduce_sum(input_tensor=x, axis=0) output_dtype, sum_fn = _sum_combine_fn_and_dtype(x.dtype) return _numeric_combine( inputs=[x], fn=sum_fn, default_accumulator_value=0, reduce_instance_dims=reduce_instance_dims, output_dtypes=[output_dtype])[0]
tensorflow.sparse.reduce_sum
785
import tensorflow as tf def conv1d_banks(inputs, K=16, is_training=True, scope="conv1d_banks"): with tf.variable_scope(scope): outputs = tf.layers.conv1d(inputs, embed_size // 2, 1, padding="SAME") for k in range(2, K + 1): with tf.variable_scope("num_{}".format(k)): output = tf.layers.conv1d(inputs, embed_size // 2, k, padding="SAME") outputs = tf.concat((outputs, output), -1) outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=is_training)) return outputs
tensorflow.layers.conv1d
786
import tensorflow as tf mode, data_parallelism=data_parallelism, decode_hparams=decode_hparams) global_step = tf.train.get_global_step() mesh_shape = mtf.convert_to_shape(hparams.mesh_shape) layout_rules = mtf.convert_to_layout_rules(hparams.layout)
tensorflow.train.get_global_step
787
import tensorflow as tf if input_data_type != data_type: images = tf.cast(images, data_type) network = ConvNetBuilder( images, input_nchan, phase_train, self.data_format, data_type) self.model_conf.add_inference(network) # Add the final fully-connected class layer logits = network.affine(nclass, activation='linear') if not phase_train: top_1_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 1), data_type)) top_5_op = tf.reduce_sum( tf.cast(tf.nn.in_top_k(logits, labels, 5), data_type)) return (logits, top_1_op, top_5_op) loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth)
tensorflow.nn.in_top_k
788
import tensorflow as tf For the scale (gamma), a ones initializer is used by default. Args: var_name: name of variable as a string. """ if var_name not in self._initializers: if var_name == self.GAMMA: self._initializers[self.GAMMA] = tf.ones_initializer() elif var_name == self.BETA: self._initializers[self.BETA] = tf.zeros_initializer() def _build_statistics_variance(self, input_batch, reduction_indices, use_batch_stats): """Builds the statistics part of the graph when using moving variance.
tensorflow.ones_initializer
789
import tensorflow as tf layers = tf.keras.layers def parse(line): """Parse a line from the colors dataset.""" # Each line of the dataset is comma-separated and formatted as # color_name, r, g, b # so `items` is a list [color_name, r, g, b]. items = tf.string_split([line], ",").values rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255. # Represent the color name as a one-hot encoded character sequence. color_name = items[0] chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256) # The sequence length is needed by our RNN. length = tf.cast(tf.shape(chars)[0], dtype=tf.int64) return rgb, chars, length def maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here.
tensorflow.string_to_number
790
import tensorflow as tf Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply( (1 - labels), tf.log(1 - predictions + eps)) return tf.losses.compute_weighted_loss(losses, weights) def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'): """Define a kappa loss, Its a continuous differentiable approximation of discrete kappa loss. Args: predictions: 2D tensor or array, [batch_size, num_classes] predictions of the network .
tensorflow.losses.compute_weighted_loss
791
import tensorflow as tf return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale
tensorflow.math.abs
792
import tensorflow as tf # List of dicts to dict of lists metrics = dict(zip(metrics[0], zip(*[m.values() for m in metrics]))) metrics = {m: np.nanmean(metrics[m], axis=0) for m in metrics} return metrics def _checkpoint_var_search(self, checkpoint_path): reader = tf.train.NewCheckpointReader(checkpoint_path) saved_shapes = reader.get_variable_to_shape_map() model_names = tf.model_variables() # Used by tf.slim layers if not len(tf.model_variables()): model_names = tf.global_variables() # Fallback when slim is not used model_names = set([v.name.split(':')[0] for v in model_names]) checkpoint_names = set(saved_shapes.keys()) found_names = model_names & checkpoint_names missing_names = model_names - checkpoint_names shape_conflicts = set()
tensorflow.model_variables
793
from tensorflow.python.ops import array_ops def reallocate(): next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value()
tensorflow.python.ops.array_ops.pack
794
from tensorflow.python.ops import math_ops weights=weights) metric = math_ops.div(tp, math_ops.add(tp, fp), name=name) update = math_ops.div(
tensorflow.python.ops.math_ops.add
795
import tensorflow as tf original_tensor_shape = tf.shape(tensor) in_dim = int(tensor.get_shape()[-1]) rank = _rank(tensor) if rank > 2: # -- time distributed dense tensor = tf.reshape(tensor, shape=(-1, in_dim)) name = opts.get("name", "") if weight is None: initializer = tf.contrib.layers.xavier_initializer(uniform=True) weight = tf.get_variable("{}_dense_W".format(name), initializer=initializer(shape=(in_dim, hidden_dims))) if bias is None: bias = tf.get_variable("{}_dense_b".format(name), initializer=tf.zeros(shape=hidden_dims)) out = tf.add(tf.matmul(tensor, weight), bias) if rank > 2: # reshape back to time dimension out = tf.reshape(out, shape=original_tensor_shape)
tensorflow.contrib.layers.xavier_initializer
796
from tensorflow.python.ops import array_ops logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)
tensorflow.python.ops.array_ops.zeros_like
797
import tensorflow as tf input_tensor, units=bert_config.hidden_size, activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) label_weights = tf.reshape(label_weights, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) # The `positions` tensor might be zero-padded (if the sequence is too # short to have the maximum number of predictions). The `label_weights` # tensor has a value of 1.0 for every real prediction and 0.0 for the # padding predictions. per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1]) numerator = tf.reduce_sum(label_weights * per_example_loss) denominator = tf.reduce_sum(label_weights) + 1e-5
tensorflow.nn.log_softmax
798
from tensorflow.python.platform import gfile # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1))) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3))) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2))) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1"))
tensorflow.python.platform.gfile.Exists
799