seed
stringlengths
59
2.16k
seed_api
stringlengths
14
101
index
int64
0
523
import tensorflow as tf if self.temperature is not None: logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index)
tensorflow.tanh
100
import tensorflow as tf for var in vars_list: vname = var.name from_name = vname var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name) assign_ops.append(tf.assign(var, var_value)) sess.run(assign_ops)
tensorflow.contrib.framework.load_variable
101
import tensorflow as tf return default_params def bilstm_layer(self, embeddings, nwords): t = tf.transpose(embeddings, perm=[1, 0, 2]) lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords) output_bw, _ = lstm_cell_bw(t, dtype=tf.float32, sequence_length=nwords) output = tf.concat([output_fw, output_bw], axis=-1)
tensorflow.contrib.rnn.TimeReversedFusedRNN
102
import tensorflow as tf p = tf.Variable(tf.zeros([1024, 1024])) adds = [tf.assign_add(p, ones_t, use_locking=False)
tensorflow.assign_add
103
from tensorflow.contrib.framework.python.framework import checkpoint_utils """ results = self.evaluate(input_fn=input_fn, batch_size=batch_size, steps=steps) return np.sum(results[GMM.SCORES]) def weights(self): """Returns the cluster weights.""" return checkpoint_utils.load_variable( self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT) def clusters(self): """Returns cluster centers.""" clusters = checkpoint_utils.load_variable( self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
tensorflow.contrib.framework.python.framework.checkpoint_utils.load_variable
104
import tensorflow as tf return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize self.unweighted_xent = _SafeXEnt(self.y, self.probs) self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights)
tensorflow.div
105
from tensorflow.python.ops import array_ops # Accumulate the prediction to current confusion matrix. current_cm = confusion_matrix_ops.confusion_matrix( predictions, labels, num_classes, weights=weights, dtype=cm_dtype) update_op = state_ops.assign_add(total_cm, current_cm) def compute_mean_iou(name): """Compute the mean intersection-over-union via the confusion matrix.""" sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0)) sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1)) cm_diag = math_ops.to_float(array_ops.diag_part(total_cm)) denominator = sum_over_row + sum_over_col - cm_diag # If the value of the denominator is 0, set it to 1 to avoid # zero division. denominator = math_ops.select( math_ops.greater(denominator, 0), denominator, array_ops.ones_like(denominator)) iou = math_ops.div(cm_diag, denominator)
tensorflow.python.ops.array_ops.diag_part
106
from tensorflow.python.ops.control_flow_ops import with_dependencies self._params) incr_step = state_ops.assign_add(training_util.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) training_hooks = [_InitializeClustersHook( init_op, is_initialized, config.is_chief)]
tensorflow.python.ops.control_flow_ops.with_dependencies
107
from tensorflow.python.ops import nn def __init__(self, alpha, beta, validate_args=False, allow_nan_stats=True, name="InverseGammaWithSoftplusAlphaBeta"): parameters = locals() parameters.pop("self") with ops.name_scope(name, values=[alpha, beta]) as ns: super(InverseGammaWithSoftplusAlphaBeta, self).__init__( alpha=nn.softplus(alpha, name="softplus_alpha"), beta=nn.softplus(beta, name="softplus_gamma"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=ns) self._parameters = parameters
tensorflow.python.ops.nn.softplus
108
import tensorflow as tf shape_b: a list containing shape of the second tensor. Returns: Either a tf.no_op() when shapes are all static and a tf.assert_equal() op when the shapes are dynamic. Raises: ValueError: When shapes are both static and unequal. """ if isinstance(shape_a[0], int) and isinstance(shape_b[0], int): if shape_a[0] != shape_b[0]: raise ValueError('Unequal first dimension {}, {}'.format( shape_a[0], shape_b[0])) else: return tf.no_op() else: return tf.assert_equal(shape_a[0], shape_b[0])
tensorflow.assert_equal
109
import tensorflow as tf sh = x.get_shape().as_list() x = tf.reshape(x, [tf.reduce_prod(s[:-1]), sh[-1]])
tensorflow.reduce_prod
110
import tensorflow as tf domain_predictions = tf.sigmoid(logits) domain_loss = tf.losses.log_loss(domain_selection_mask, domain_predictions, weights=weight) domain_accuracy = util.accuracy_tf(domain_selection_mask, tf.round(domain_predictions)) assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss]) with tf.control_dependencies([assert_op]): tag_loss = 'losses/domain_loss'
tensorflow.round
111
import tensorflow as tf elif kh == 6 and kw == 6: return 36.0 * 7.0 / 256.0 elif kh == 7 and kw == 7: return 49.0 * 21.0 / 1024.0 elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32)) scale *= rec return scale @register_keras_serializable( package='Vitis', name='VitisGlobalAveragePooling2D') class VitisGlobalAveragePooling2D(tf.keras.layers.GlobalAveragePooling2D): """Vitis version of GlobalAveragePooling2D layer. This is an Vitis version of average pooling to simulate DPU behaviour which to
tensorflow.argmin
112
from tensorflow.python.framework import ops else: return [] def get_extra_inputs(): """Returns the captured input tensors by the function. Returns: If the default graph is being used to define a function, the returned list of tensors are those accessed inside the function body but defined outside the function body so far. Otherwise, returns an empty list. """ g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_inputs else: return [] def get_extra_args(): """Returns the corresponding function arguments for the captured inputs. Returns: If the default graph is being used to define a function, the returned list of place holders are those used inside the function
tensorflow.python.framework.ops.get_default_graph
113
from tensorflow.python.ops import sparse_ops expanded_shape = array_ops.concat( 0, (array_ops.slice(tensor.shape, [0], expand_dims), [1], array_ops.slice(tensor.shape, expand_dims, [-1])), name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded
tensorflow.python.ops.sparse_ops.sparse_concat
114
import tensorflow as tf st_serialized = tf.serialize_many_sparse(st)
tensorflow.serialize_many_sparse
115
import tensorflow as tf logits /= self.temperature if self.tanh_constant is not None: logits = self.tanh_constant * tf.tanh(logits) index = tf.multinomial(logits, 1) index = tf.to_int32(index) index = tf.reshape(index, [1])
tensorflow.multinomial
116
from tensorflow.python.summary import summary if summ not in OPTIMIZER_SUMMARIES: raise ValueError("Summaries should be one of [%s], you provided %s." % (", ".join(OPTIMIZER_SUMMARIES), summ)) if learning_rate is not None and learning_rate_decay_fn is not None: if global_step is None: raise ValueError("global_step is required for learning_rate_decay_fn.") lr = learning_rate_decay_fn(lr, global_step) if "learning_rate" in summaries: summary.scalar("learning_rate", lr) # Create optimizer, given specified parameters. if isinstance(optimizer, six.string_types): if lr is None: raise ValueError("Learning rate is None, but should be specified if " "optimizer is string (%s)." % optimizer) if optimizer not in OPTIMIZER_CLS_NAMES:
tensorflow.python.summary.summary.scalar
117
import tensorflow as tf import tensorflow as tf from tensorflow.python.framework import ops ops.reset_default_graph() # Create graph sess = tf.Session() # Create tensors # Create data to feed in x_vals = np.array([1., 3., 5., 7., 9.]) x_data = tf.placeholder(tf.float32) m = tf.constant(3.) # Multiplication prod = tf.mul(x_data, m) for x_val in x_vals: print(sess.run(prod, feed_dict={x_data: x_val})) merged = tf.merge_all_summaries() if not os.path.exists('tensorboard_logs/'): os.makedirs('tensorboard_logs/')
tensorflow.placeholder
118
import tensorflow as tf with tf.variable_scope("evaluation"): accuracy_1 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_1, axis=-1), tf.argmax(y_1, axis=-1)), tf.float32), name="accuracy_1") accuracy_2 = tf.reduce_mean(tf.cast(tf.equal( tf.argmax(output_2, axis=-1), tf.argmax(y_2, axis=-1)), tf.float32), name="accuracy_2") accuracy = tf.divide(accuracy_1 + accuracy_2, 2.0, name="accuracy") with tf.variable_scope("train"): global_step = tf.get_variable("global_step", shape=(), dtype=tf.int32, trainable=False) train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss_total, global_step=global_step) with tf.variable_scope("summary"): summary_loss_total = tf.summary.scalar("loss_total", loss_total)
tensorflow.divide
119
import tensorflow.contrib.layers as layers out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model_w_feat_eng(img_in, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in
tensorflow.contrib.layers.convolution2d
120
from tensorflow.python.ops import array_ops next_size = _next_array_size(new_size) next_shape = array_ops.pack([next_size] + fixed_shape) new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]): copy_op = array[:size].assign(old_value[:size]) # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op() new_size = size + batch_size array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op) with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size) if metrics_collections: ops.add_to_collections(metrics_collections, value) if updates_collections:
tensorflow.python.ops.array_ops.shape_internal
121
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature return self._infer_model(x=x, batch_size=batch_size, proba=True) def _check_inputs(self, features, targets): if self._features_info is not None: if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) if self._targets_info is not None: if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) def _train_model(self, input_fn, steps, feed_fn=None,
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.tensors_compatible
122
from tensorflow.contrib.learn.python.learn.estimators import test_data bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10))
tensorflow.contrib.learn.python.learn.estimators.test_data.get_quantile_based_buckets
123
import tensorflow as tf internals = dict() for name in sorted(self.internals_memory): internals[name] = tf.gather(params=self.internals_memory[name], indices=indices) actions = dict()
tensorflow.gather
124
from tensorflow.python.ops import state_ops with ops.control_dependencies([v_diff]): # run v_diff operation before scatter_add scaled_grad = scatter_add(vstar, indices, grad) var_update = state_ops.assign_sub(var, lr_t * (scaled_grad + gold)) return control_flow_ops.group(*[var_update, ]) def _apply_sparse(self, grad, var): # sparse grad (only for the shakespeare model) return self._apply_sparse_shared( grad.values, var, grad.indices, lambda x, i, v: state_ops.scatter_add(x, i, v)) def set_params(self, cog, avg_gradient, client): with client.model.graph.as_default(): all_vars = tf.trainable_variables() for variable, value in zip(all_vars, cog): vstar = self.get_slot(variable, "vstar") vstar.load(value, client.model.sess)
tensorflow.python.ops.state_ops.scatter_add
125
import tensorflow as tf eps: a constant to set upper or lower limit for labels, smoothening factor name: Optional scope/name for op_scope. Returns: A tensor with the log loss. """ with tf.name_scope(name): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) losses = -tf.multiply(labels, tf.log(predictions + eps)) - tf.multiply( (1 - labels), tf.log(1 - predictions + eps)) return tf.losses.compute_weighted_loss(losses, weights) def kappa_loss(predictions, labels, y_pow=1, eps=1e-15, num_ratings=5, batch_size=32, name='kappa'):
tensorflow.to_float
126
import tensorflow as tf #################################### # Utils #################################### def _do_cutout(self, image, im_width, im_height, cutout_size): mask = tf.ones([cutout_size, cutout_size], dtype=tf.int32) start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob):
tensorflow.pad
127
import tensorflow.contrib.graph_editor as ge for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts
tensorflow.contrib.graph_editor.get_forward_walk_ops
128
from tensorflow.python.ops import math_ops def _predictions_streaming_mean(predictions, unused_labels, weights=None): return metric_ops.streaming_mean(predictions, weights=weights) def _streaming_auc(predictions, labels, weights=None): return metric_ops.streaming_auc( predictions, labels, weights=_float_weights_or_none(weights)) def _accuracy_at_threshold(threshold): def _accuracy_metric(predictions, labels, weights=None): threshold_predictions = math_ops.to_float( math_ops.greater_equal(predictions, threshold)) return metric_ops.streaming_accuracy( predictions=threshold_predictions, labels=labels, weights=weights) return _accuracy_metric def _streaming_at_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, labels, weights=None): precision_tensor, update_op = streaming_metrics_fn( predictions, labels=labels, thresholds=[threshold],
tensorflow.python.ops.math_ops.greater_equal
129
import tensorflow as tf cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits)) Focal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5)) l2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()]) Center_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num) Total_loss = cost + l2_loss optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(Total_loss) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/' # annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json' # # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:
tensorflow.control_dependencies
130
import tensorflow as tf tf.float32) h = tf.zeros([config.num_layers, self.batch_size, config.hidden_size], tf.float32) self._initial_state = (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) outputs, h, c = self._cell(inputs, h, c, self._rnn_params, is_training) outputs = tf.transpose(outputs, [1, 0, 2]) outputs = tf.reshape(outputs, [-1, config.hidden_size]) return outputs, (tf.contrib.rnn.LSTMStateTuple(h=h, c=c),) def _get_lstm_cell(self, config, is_training): #if config.rnn_mode == BASIC: # return tf.contrib.rnn.BasicLSTMCell( # config.hidden_size, forget_bias=0.0, state_is_tuple=True, # reuse=not is_training)
tensorflow.contrib.rnn.LSTMStateTuple
131
from tensorflow.python.ops import array_ops input_c = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) params = variables.Variable( array_ops.ones([params_size_t]), validate_shape=False) output, output_h, output_c = model( is_training=True,
tensorflow.python.ops.array_ops.ones
132
import tensorflow as tf tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), "is_real_example": tf.FixedLenFeature([1], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t)
tensorflow.FixedLenFeature
133
import tensorflow as tf xent = -tf.reduce_sum(one_hot_spare_rep * tf.log(word_probs), axis=-1) # [batch_size, max_dec_steps]
tensorflow.log
134
import tensorflow as tf def initialize_tf_vars(self): """Initialize all uninitialized variables in session.""" with tf.name_scope('initialize_tf_vars'): uninited_set = [ e.decode() for e in self.sess.run(tf.report_uninitialized_variables()) ] self.sess.run( tf.variables_initializer([ v for v in tf.global_variables() if v.name.split(':')[0] in uninited_set
tensorflow.report_uninitialized_variables
135
import tensorflow as tf order_m = tf.convert_to_tensor(value=order_m) x = tf.convert_to_tensor(value=x) pmm = _evaluate_legendre_polynomial_pmm_eval(order_m, x) return tf.where( tf.equal(degree_l, order_m), pmm, _evaluate_legendre_polynomial_branch(degree_l, order_m, x, pmm)) def _spherical_harmonics_normalization(l, m, var_type=tf.float64): l = tf.cast(l, dtype=var_type) m = tf.cast(m, dtype=var_type) numerator = (2.0 * l + 1.0) * factorial(l - tf.abs(m)) denominator = 4.0 * np.pi * factorial(l + tf.abs(m)) return tf.sqrt(numerator / denominator) def _evaluate_spherical_harmonics_branch(degree, order, theta, phi, sign_order, var_type=tf.float64): sqrt_2 = tf.constant(1.41421356237, dtype=var_type) order_float = tf.cast(order, dtype=var_type) tmp = sqrt_2 * _spherical_harmonics_normalization(
tensorflow.abs
136
import tensorflow as tf Returns: A namedtuple of input and target data. """ input_text = tf.map_fn(lambda x: x[:-1], chunk) target_text = tf.map_fn(lambda x: x[1:], chunk) return (input_text, target_text)
tensorflow.map_fn
137
from tensorflow.python.ops import math_ops variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
tensorflow.python.ops.math_ops.maximum
138
import tensorflow as tf background_indicator = tf.logical_or(negative_matches, ignored_matches)
tensorflow.logical_or
139
import tensorflow as tf hparams.video_num_input_frames = video_num_input_frames hparams.video_num_target_frames = video_num_target_frames else: return video_num_input_frames, video_num_target_frames @gin.configurable(module='trax.data', denylist=['dataset', 'training']) def bair_robot_pushing_preprocess(dataset, training): """Pre-processing function that concatenates input and target frames.""" del training def concat_and_add_mask(features, targets): """Concatenate input and output frames to form a language modeling setup.""" inp = features['inputs'] concat = tf.concat([inp, targets], axis=0) mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0) concat = tf.reshape(concat, (-1,)) mask = tf.reshape(mask, (-1,)) concat = tf.cast(concat, tf.int32) mask = tf.cast(mask, tf.float32) features['inputs'] = features['targets'] = concat features['mask'] = mask return features, concat dataset = dataset.map(concat_and_add_mask) return dataset def sentencepiece_tokenize(stream, spm_path=None, extra_ids=0): """Sentencepiece tokenization."""
tensorflow.zeros_like
140
from tensorflow.python.layers import pooling as pooling_layers """Construct a max pooling layer.""" if input_layer is None: input_layer = self.top_layer else: self.top_size = num_channels_in name = 'mpool' + str(self.counts['mpool']) self.counts['mpool'] += 1 pool = pooling_layers.max_pooling2d( input_layer, [k_height, k_width], [d_height, d_width], padding=mode, data_format=self.channel_pos, name=name) self.top_layer = pool return pool
tensorflow.python.layers.pooling.max_pooling2d
141
import tensorflow as tf def resnet_model_fn(inputs, training): """Our model_fn for ResNet to be used with our Estimator.""" network = resnet_model.imagenet_resnet_v2( resnet_size=18, num_classes=class_num, mode='se', data_format=None) inputs= network(inputs=inputs, is_training=training) feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = tf.layers.dense(inputs=inputs, units=class_num) # inputs = tf.layers.dense(inputs=feat, units=class_num) inputs = tf.identity(inputs, 'final_dense') return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels]) label = tf.placeholder(tf.float32, shape=[None,]) one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num) training_flag = tf.placeholder(tf.bool)
tensorflow.identity
142
import tensorflow as tf self.loss1 = tf.reduce_mean(tf.abs(self.Y_hat - self.Y)) self.loss2 = tf.reduce_mean(tf.abs(self.Z_hat - self.Z)) self.loss = self.loss1 + self.loss2 self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # In[3]: tf.reset_default_graph() sess = tf.InteractiveSession() size_layers = 128 learning_rate = 1e-3 num_layers = 2 model = Model(num_layers, size_layers, learning_rate) sess.run(tf.global_variables_initializer())
tensorflow.InteractiveSession
143
import tensorflow as tf x = tf.zeros([1000000], dtype=np.float32) y = tf.py_func(lambda x: x + 1, [x], [tf.float32])
tensorflow.py_func
144
import tensorflow as tf "spm_model": self._args["spm_model"], "languages": self._args["languages"], "with_src_lang_tag": self._with_src_lang_tag, "trg_lang_tag_position": self._trg_lang_tag_position, } def inputs_signature(self, mode): """ Returns the input dtypes and signatures. """ dtypes = {"feature": tf.int64, "src_lang": tf.int64, "trg_lang": tf.int64} signatures = {"feature": tf.TensorShape([None, None]), "src_lang": tf.TensorShape([None, ]), "trg_lang": tf.TensorShape([None, ])} if mode == compat.ModeKeys.INFER: return dtypes, signatures dtypes["label"] = tf.int64 signatures["label"] = tf.TensorShape([None, None]) return dtypes, signatures def build_model(self, args, name=None): """ Builds and return a keras model. """ model = build_model(args, self._multilingual_dp.meta, self._multilingual_dp.meta, name=name)
tensorflow.TensorShape
145
import tensorflow as tf y0 = tf.to_int32(tf.floor(y)) y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth
tensorflow.clip_by_value
146
import tensorflow as tf ) # Extract current batch size (in case this is a partial batch). cur_batch_size = dynamic_image_shape[0] # Get static shape of image. # shape = (3,) static_image_shape = params["generator_projection_dims"] print_obj( "minibatch_stddev", "static_image_shape", static_image_shape ) # cur_batch_size must be divisible by or smaller than group_size. divisbility_condition = tf.equal( x=tf.mod(x=cur_batch_size, y=group_size), y=0, name="divisbility_condition" ) less_than_condition = tf.less( x=cur_batch_size, y=group_size, name="less_than_condition" ) any_condition = tf.reduce_any( input_tensor=[divisbility_condition, less_than_condition], name="any_condition" )
tensorflow.mod
147
import tensorflow as tf class batch_norm(object): def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): with tf.variable_scope(name): self.epsilon = epsilon self.momentum = momentum self.name = name def __call__(self, x): return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, is_training=tftrain, scope=self.name) def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
tensorflow.contrib.layers.batch_norm
148
import tensorflow as tf # layers self.value_estimate = tf.layers.dense(self.state, 1, kernel_initializer=w_init, name='v') # estimated value for state # loss and optimizer self.loss = tf.squared_difference(self.value_estimate, self.target) self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_op = self.optimizer.minimize( self.loss, global_step=tf.contrib.framework.get_global_step())
tensorflow.squared_difference
149
from tensorflow.python.ops import gen_resource_variable_ops @property def op(self): return self.get().op def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking
tensorflow.python.ops.gen_resource_variable_ops.read_variable_op
150
import tensorflow as tf items: the list of items to decode. These must be a subset of the item keys in self._items_to_handlers. If `items` is left as None, then all of the items in self._items_to_handlers are decoded. Returns: the decoded items, a list of tensor. """ context, sequence = tf.parse_single_sequence_example( serialized_example, self._context_keys_to_features, self._sequence_keys_to_features) tokens_raw = sequence[self.tokens_feature_name] tokens = tf.string_split(tokens_raw, delimiter=self.delimiter).values
tensorflow.parse_single_sequence_example
151
from tensorflow.contrib import metrics as metrics_lib predictions = math_ops.sigmoid(logits) result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets)
tensorflow.contrib.metrics.streaming_auc
152
from tensorflow.python.ops import variables def _BenchmarkOp(self, op, desc): burn_in_steps = 10 benchmark_steps = 40 with session.Session() as sess: sess.run(variables.global_variables_initializer()) for i in xrange(burn_in_steps + benchmark_steps): if i == burn_in_steps: start_time = time.time() sess.run(op)
tensorflow.python.ops.variables.global_variables_initializer
153
import tensorflow as tf Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.) KL += tf.reduce_mean(alpha_std**2+alpha_mean**2-2*alpha_logstd-1)/2. # Output layer
tensorflow.sqrt
154
import tensorflow as tf # Global step with tf.variable_scope('training_step', reuse=tf.AUTO_REUSE): global_step = tf.get_variable("global_step", [], dtype=tf.int32, initializer=tf.constant_initializer(0), trainable=False) # Loss value reg_item = tf.contrib.layers.l1_l2_regularizer(L1_reg, L2_reg) reg_term = tf.contrib.layers.apply_regularization(reg_item, self.nnweights) loss_fun = self._negative_log_likelihood(y_, y) loss = loss_fun + reg_term # SGD Optimizer if optimizer == 'sgd':
tensorflow.contrib.layers.l1_l2_regularizer
155
from tensorflow.contrib.layers.python.layers import utils update_second_moment_op = moving_averages.assign_moving_average( variable=self._moving_second_moment, value=second_moment, decay=self._decay_rate, name="update_moving_second_moment").op return update_mean_op, update_second_moment_op def build_no_ops(): return (tf.no_op(), tf.no_op()) # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op)
tensorflow.contrib.layers.python.layers.utils.constant_value
156
import tensorflow as tf # 0 <= z < depth, 0 <= y < height & 0 <= x < width. max_z = tf.to_int32(tf.shape(im)[1] - 1) max_y = tf.to_int32(tf.shape(im)[2] - 1) max_x = tf.to_int32(tf.shape(im)[3] - 1) # Converts scale indices from [-1, 1] to [0, width/height/depth]. x = (x + 1.0) * (width_f) / 2.0 y = (y + 1.0) * (height_f) / 2.0 z = (z + 1.0) * (depth_f) / 2.0 x0 = tf.to_int32(tf.floor(x)) x1 = x0 + 1 y0 = tf.to_int32(tf.floor(y)) y1 = y0 + 1 z0 = tf.to_int32(tf.floor(z)) z1 = z0 + 1 x0_clip = tf.clip_by_value(x0, zero, max_x) x1_clip = tf.clip_by_value(x1, zero, max_x) y0_clip = tf.clip_by_value(y0, zero, max_y) y1_clip = tf.clip_by_value(y1, zero, max_y) z0_clip = tf.clip_by_value(z0, zero, max_z) z1_clip = tf.clip_by_value(z1, zero, max_z) dim3 = width dim2 = width * height dim1 = width * height * depth base = _repeat( tf.range(num_batch) * dim1, out_depth * out_height * out_width)
tensorflow.floor
157
from tensorflow.contrib.slim.python.slim.data import test_utils data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
tensorflow.contrib.slim.python.slim.data.test_utils.create_tfrecord_files
158
import tensorflow as tf return tf.random_normal_initializer(0.0, params.initializer_gain) elif params.initializer == "normal_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="normal") elif params.initializer == "uniform_unit_scaling": return tf.variance_scaling_initializer(params.initializer_gain, mode="fan_avg", distribution="uniform") else: raise ValueError("Unrecognized initializer: %s" % params.initializer)
tensorflow.variance_scaling_initializer
159
import tensorflow as tf self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0)) flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]) flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1] aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size]) aggregated_lm_emb *= self.lm_scaling context_emb_list.append(aggregated_lm_emb) context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb] head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb] context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb] text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length] context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb] num_words = util.shape(context_outputs, 0) genre_emb = tf.gather(tf.get_variable("genre_embeddings", [len(self.genres), self.config["feature_size"]]), genre) # [emb] sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length] flattened_sentence_indices = self.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words] flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words] candidate_starts = tf.tile(tf.expand_dims(tf.range(num_words), 1), [1, self.max_span_width]) # [num_words, max_span_width]
tensorflow.sequence_mask
160
from tensorflow.contrib.learn.python.learn.datasets import base linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=(bucketized_feature,), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3))
tensorflow.contrib.learn.python.learn.datasets.base.load_iris
161
from tensorflow.python.ops import array_ops # term from the formula above. # `relevant_precision_per_k` (float64) - Relevant precisions; i.e., # precisions at all k for which relevance indicator is true. relevant_per_k = _sparse_true_positive_at_k( predictions_idx_per_k, labels_per_k, name='relevant_per_k') tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k') retrieved_per_k = math_ops.cumsum( array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k') precision_per_k = math_ops.div( math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k), name='precision_per_k') relevant_precision_per_k = math_ops.mul( precision_per_k, math_ops.to_double(relevant_per_k), name='relevant_precision_per_k')
tensorflow.python.ops.array_ops.ones_like
162
from tensorflow.python.framework import function self._testGraphExtensionRestore() def testStrippedOpListDef(self): with self.test_session(): # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32) def minus_one(x): return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables() # Generates MetaGraphDef.
tensorflow.python.framework.function.Defun
163
import tensorflow as tf if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params, rnn_params, base_variable_scope="Model/RNN") tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
tensorflow.contrib.cudnn_rnn.RNNParamsSaveable
164
from tensorflow.python.ops import math_ops array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]), thresh_tiled) pred_is_neg = math_ops.logical_not(pred_is_pos) # Tile labels by number of thresholds label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1]) label_is_neg = math_ops.logical_not(label_is_pos) true_positives = _create_local('true_positives', shape=[num_thresholds]) false_negatives = _create_local('false_negatives', shape=[num_thresholds]) true_negatives = _create_local('true_negatives', shape=[num_thresholds]) false_positives = _create_local('false_positives', shape=[num_thresholds])
tensorflow.python.ops.math_ops.logical_not
165
from tensorflow.python.framework import ops name: A name for the operation (optional). Returns: A Tensor with the same type as `x` if `x.dtype != qint32` otherwise the return type is `quint8`. """ with ops.op_scope([x], name, "Sigmoid") as name: x = ops.convert_to_tensor(x, name="x") return gen_math_ops._sigmoid(x, name=name) def tanh(x, name=None):
tensorflow.python.framework.ops.op_scope
166
from tensorflow.python.training import adagrad input_fn=_input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkCustomOptimizer(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=tempfile.mkdtemp(), linear_feature_columns=(bucketized_feature,), linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=(cont_feature,), dnn_hidden_units=(3, 3), dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) input_fn = test_data.iris_input_logistic_fn metrics = classifier.fit(input_fn=input_fn, steps=_ITERS).evaluate( input_fn=input_fn, steps=100) self._assertSingleClassMetrics(metrics) def benchmarkMultiClass(self): iris = base.load_iris() cont_feature = feature_column.real_valued_column('feature', dimension=4) bucketized_feature = feature_column.bucketized_column( cont_feature, test_data.get_quantile_based_buckets(iris.data, 10)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3,
tensorflow.python.training.adagrad.AdagradOptimizer
167
import tensorflow as tf # tf.matrix_diag(tf.trace(tf.matmul(Li_eKuffu_Lit, cov))) + tf.einsum("ig,nij,jh->ngh", q_mu, Li_eKuffu_Lit, q_mu) - # tf.matmul(q_mu, tf.matmul(Li_eKuffu_Lit, q_mu), transpose_a=True) - fmean[:, :, None] * fmean[:, None, :] + e_related_to_mean ) else: fvar = ( (eKff - tf.trace(Li_eKuffu_Lit))[:, None] + tf.einsum("nij,dji->nd", Li_eKuffu_Lit, cov) + tf.einsum("ig,nij,jg->ng", q_mu, Li_eKuffu_Lit, q_mu) - fmean ** 2 + tf.matrix_diag_part(e_related_to_mean) ) return fmean, fvar # --------------------------------------------------------------- ########################## HELPERS ##############################
tensorflow.einsum
168
import tensorflow as tf dtype=tf.float32, initializer=tf.constant_initializer(0), trainable=False) label = tf.reshape(label, [-1]) centers_batch = tf.gather(centers, label) diff = (1 - alpha) * (centers_batch - features) centers = tf.scatter_sub(centers, label, diff) loss = tf.nn.l2_loss(features - centers_batch) return loss, centers def correlation_loss(source_samples, target_samples, weight, name='corr_loss'):
tensorflow.scatter_sub
169
import tensorflow as tf np.random.seed(127) num_elements = 10000 batch_size = 64 indices_batch = np.random.randint( batch_size, size=num_elements, dtype=np.int64) indices_value = np.arange(num_elements, dtype=np.int64) indices = np.asarray( sorted(zip(indices_batch, indices_value)), dtype=np.int64) values = ["feature_value_for_embedding_lookup"] * num_elements shape = np.asarray([batch_size, num_elements], dtype=np.int64) with tf.Session() as sess: with tf.device("/cpu:0"): indices = tf.Variable(indices) values = tf.Variable(values) shape = tf.Variable(shape) st = tf.SparseTensor(indices, values, shape) st_handles = add_many_sparse_to_tensors_map(st) st_roundtrip = take_many_sparse_from_tensors_map( sparse_map_op=st_handles.op, sparse_handles=st_handles) st_roundtrip_op = st_roundtrip.values.op st_serialized = tf.serialize_many_sparse(st) st_deserialized = tf.deserialize_many_sparse( st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op tf.global_variables_initializer().run() st_roundtrip_values = sess.run(st_roundtrip)
tensorflow.SparseTensor
170
import tensorflow as tf labels = tf.random_uniform( [batch_size], minval=1, maxval=nclass, dtype=tf.int32, name='synthetic_labels') # Note: This results in a H2D copy, but no computation # Note: This avoids recomputation of the random values, but still # results in a H2D copy. images = tf.contrib.framework.local_variable(images, name='images') labels = tf.contrib.framework.local_variable(labels, name='labels') # Change to 0-based (don't use background class like Inception does) labels -= 1 if num_compute_devices == 1: images_splits = [images] labels_splits = [labels] else: images_splits = tf.split(images, num_compute_devices, 0)
tensorflow.contrib.framework.local_variable
171
import tensorflow as tf x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r)
tensorflow.string_join
172
from tensorflow.python.framework import ops ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) if not TF_23: ops.register_dense_tensor_like_type(ReplicatedVariable)
tensorflow.python.framework.ops.register_dense_tensor_like_type
173
from tensorflow.python.training import server_lib "worker": ["localhost:%s" % port1], "ps": ["localhost:%s" % port2] }) worker = server_lib.Server(cs, job_name="worker", start=True) ps = server_lib.Server(cs, job_name="ps", start=True) return worker, ps @contextlib.contextmanager
tensorflow.python.training.server_lib.Server
174
import tensorflow as tf def bilstm_layer(self, embeddings, nwords): t = tf.transpose(embeddings, perm=[1, 0, 2]) lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(self.params['lstm_size']) lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw) output_fw, _ = lstm_cell_fw(t, dtype=tf.float32, sequence_length=nwords)
tensorflow.contrib.rnn.LSTMBlockFusedCell
175
from tensorflow.python.framework import ops Raises: ValueError: if k is invalid. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope( None, 'average_precision', (predictions, labels, k)) as scope: # Calculate top k indices to produce [D1, ... DN, k] tensor. _, predictions_idx = nn.top_k(predictions, k) predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
tensorflow.python.framework.ops.name_scope
176
import tensorflow as tf weights[CLUSTER_CENTROIDS].assign( weights[CLUSTERING_IMPL].cluster_centroids ) # Insert clustering variables weights[PULLING_INDICES].assign(tf.dtypes.cast( weights[CLUSTERING_IMPL].get_pulling_indices( weights[ORIGINAL_WEIGHTS]), weights[PULLING_INDICES].dtype )) output = weights[CLUSTERING_IMPL].get_clustered_weight( weights[PULLING_INDICES], weights[ORIGINAL_WEIGHTS]) inputs.assign(output) else: if self.preserve_sparsity: inputs = tf.multiply(inputs, weights[SPARSITY_MASK]) output = inputs else: output = inputs return quant_ops.LastValueQuantize( output, weights['min_var'], weights['max_var'], is_training=training, num_bits=self.num_bits, per_channel=self.per_axis, symmetric=self.symmetric, narrow_range=self.narrow_range )
tensorflow.multiply
177
import tensorflow as tf with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path)
tensorflow.all_variables
178
from tensorflow.python.ops import gen_math_ops # # Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that # allows some conversions that cast() can't do, e.g. casting numbers to # strings. x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype == dtype: return x return gen_math_ops.cast(x, dtype, name=name) def to_float(x, name="ToFloat"): """Casts a tensor to type `float32`. Args:
tensorflow.python.ops.gen_math_ops.cast
179
from tensorflow.python.ops import gradients_impl output, output_h, output_c = model( is_training=True, input_data=input_data, input_h=input_h, input_c=input_c, params=params) all_grads = gradients_impl.gradients( [output, output_h, output_c], [params, input_data, input_h, input_c]) training_op = control_flow_ops.group(*all_grads) self._BenchmarkOp(training_op, "cudnn_lstm %s %s" % (config_name, self._GetConfigDesc(config)))
tensorflow.python.ops.gradients_impl.gradients
180
import tensorflow as tf y0_f = tf.to_float(y0) y1_f = tf.to_float(y1) z0_f = tf.to_float(z0) z1_f = tf.to_float(z1) # Check the out-of-boundary case. x0_valid = tf.to_float( tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0)) x1_valid = tf.to_float( tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0)) y0_valid = tf.to_float( tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0)) y1_valid = tf.to_float( tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0)) z0_valid = tf.to_float( tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0)) z1_valid = tf.to_float( tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0)) w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) * (z1_f - z) * x1_valid * y1_valid * z1_valid), 1) w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) * (z1_f - z) * x0_valid * y1_valid * z1_valid), 1) w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) * (z1_f - z) * x1_valid * y0_valid * z1_valid), 1) w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *
tensorflow.less_equal
181
from tensorflow.python.training import server_lib s.bind(("", 0)) port = s.getsockname()[1] s.close() return port port1 = get_open_port() port2 = get_open_port() cs = server_lib.ClusterSpec({ "worker": ["localhost:%s" % port1], "ps": ["localhost:%s" % port2] }) worker = server_lib.Server(cs, job_name="worker", start=True) ps = server_lib.Server(cs, job_name="ps", start=True)
tensorflow.python.training.server_lib.ClusterSpec
182
from tensorflow.contrib import metrics as contrib_metrics "eval_loss": loss, } elif task_name == "sts-b": def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Pearson correlations for STS-B.""" # Display labels and predictions concat1 = contrib_metrics.streaming_concat(logits) concat2 = contrib_metrics.streaming_concat(label_ids) # Compute Pearson correlation pearson = contrib_metrics.streaming_pearson_correlation( logits, label_ids, weights=is_real_example)
tensorflow.contrib.metrics.streaming_concat
183
import tensorflow as tf b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5)
tensorflow.tensordot
184
import tensorflow as tf A tensor of shape (vec_dim) """ if reduction_mode == 'max': print('USING MAX POOLING FOR REDUCTION!') vecs_reduced = tf.segment_max(vecs, segment_inds) elif reduction_mode == 'mean': print('USING AVG POOLING FOR REDUCTION!') vecs_reduced = tf.segment_mean(vecs, segment_inds) vecs_reduced.set_shape([num_segments, vecs.get_shape()[1]]) return vecs_reduced
tensorflow.segment_mean
185
import tensorflow as tf zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name='new_learning_rate') self._lr_update = tf.assign(self._lr, self._new_lr) self.saver = tf.train.Saver(tf.global_variables()) def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0., state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.) raise ValueError('rnn_mode {} not supported'.format(config.rnn_mode)) def _build_rnn_graph(self, inputs, config, is_training): def make_cell():
tensorflow.contrib.rnn.BasicLSTMCell
186
import tensorflow as tf # if encoder.convolution_activation.lower() == 'relu': encoder_inputs_ = tf.nn.relu(encoder_inputs_) if encoder.maxout_stride: if encoder.binary: raise NotImplementedError stride = encoder.maxout_stride k = tf.to_int32(tf.ceil(time_steps / stride) * stride) - time_steps # TODO: simpler pad = tf.zeros([batch_size, k, tf.shape(encoder_inputs_)[2]]) encoder_inputs_ = tf.concat([encoder_inputs_, pad], axis=1) encoder_inputs_ = tf.nn.pool(encoder_inputs_, window_shape=[stride], pooling_type='MAX', padding='VALID', strides=[stride]) encoder_input_length_ = tf.to_int32(tf.ceil(encoder_input_length_ / stride)) if encoder.highway_layers: x = encoder_inputs_ for j in range(encoder.highway_layers): size = x.shape[2].value with tf.variable_scope('highway_{}'.format(j + 1)): g = tf.layers.dense(x, size, activation=tf.nn.sigmoid, use_bias=True, name='g') y = tf.layers.dense(x, size, activation=tf.nn.relu, use_bias=True, name='y') x = g * y + (1 - g) * x encoder_inputs_ = x
tensorflow.ceil
187
from tensorflow.python.ops import control_flow_ops # return value needs to be the same dtype as no_op() for cond with ops.control_dependencies([copy_op]): return control_flow_ops.no_op() new_size = size + batch_size array_size = array_ops.shape_internal(array, optimize=False)[0] maybe_reallocate_op = control_flow_ops.cond( new_size > array_size, reallocate, control_flow_ops.no_op) with ops.control_dependencies([maybe_reallocate_op]): append_values_op = array[size:new_size].assign(batch_values) with ops.control_dependencies([append_values_op]): update_op = size.assign(new_size)
tensorflow.python.ops.control_flow_ops.cond
188
import tensorflow as tf e_mean_Kuf = tf.reshape(e_mean_Kuf, [num_data, num_func, num_ind]) e_fmean_mean = tf.einsum("nqm,mz->nqz", e_mean_Kuf, Lit_q_mu) # N x D x D e_related_to_mean = e_fmean_mean + tf.matrix_transpose(e_fmean_mean) + e_mean_mean
tensorflow.matrix_transpose
189
import tensorflow as tf vx = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vz = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string, value_dtype=tf.int64, default_value=-1) vx_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1))
tensorflow.contrib.lookup.MutableHashTable
190
import tensorflow as tf # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape #output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1) output = facts * tf.expand_dims(alphas, -1) output = tf.reshape(output, tf.shape(facts)) # output = output / (facts.get_shape().as_list()[-1] ** 0.5) if not return_alphas: return output else: return output, alphas def din_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False): if isinstance(facts, tuple): # In case of Bi-RNN, concatenate the forward and the backward RNN outputs. facts = tf.concat(facts, 2) print ("querry_size mismatch") query = tf.concat(values = [ query, query, ], axis=1) if time_major: # (T,B,D) => (B,T,D) facts = tf.array_ops.transpose(facts, [1, 0, 2]) mask = tf.equal(mask, tf.ones_like(mask)) facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer querry_size = query.get_shape().as_list()[-1] queries = tf.tile(query, [1, tf.shape(facts)[1]]) queries = tf.reshape(queries, tf.shape(facts)) din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
tensorflow.concat
191
from tensorflow.python.ops import math_ops tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) squared_error = math_ops.square(labels - predictions) return streaming_mean(squared_error, weights, metrics_collections, updates_collections, name or 'mean_squared_error')
tensorflow.python.ops.math_ops.square
192
import tensorflow as tf def _get_lstm_cell(self, config, is_training): if config.rnn_mode == BASIC: return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=not is_training) if config.rnn_mode == BLOCK: return tf.contrib.rnn.LSTMBlockCell( config.hidden_size, forget_bias=0.0) raise ValueError("rnn_mode %s not supported" % config.rnn_mode) def _build_rnn_graph_lstm(self, inputs, config, is_training): """Build the inference graph using canonical LSTM cells."""
tensorflow.contrib.rnn.LSTMBlockCell
193
import tensorflow as tf _err_log = "SubpixelConv2d: The number of input channels == (scale x scale) x The number of output channels" if n_out_channels >= 1: if int(X.get_shape()[-1]) != (r**2) * n_out_channels: raise Exception(_err_log) # bsize, a, b, c = X.get_shape().as_list() # bsize = tf.shape(X)[0] # Handling Dimension(None) type for undefined batch dim # Xs=tf.split(X,r,3) #b*h*w*r*r # Xr=tf.concat(Xs,2) #b*h*(r*w)*r # X=tf.reshape(Xr,(bsize,r*a,r*b,n_out_channel)) # b*(r*h)*(r*w)*c X = tf.depth_to_space(X, r) else: raise RuntimeError(_err_log) return X class SubpixelConv1d(Layer): """It is a 1D sub-pixel up-sampling layer. Calls a TensorFlow function that directly implements this functionality. We assume input has dim (batch, width, r)
tensorflow.depth_to_space
194
from tensorflow.python.ops import math_ops mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay) variance = sq_mean - math_ops.square(mean) std = math_ops.sqrt(math_ops.maximum(epsilon, variance)) max_norms = math_ops.exp(mean + std_factor * std) return max_norms, mean def adaptive_clipping_fn(std_factor=2.,
tensorflow.python.ops.math_ops.exp
195
from tensorflow.python.lib.io import file_io model_name = "vgg19BNReLUmodel.h5" model.save(model_name) with file_io.FileIO(model_name, mode='rb') as input_f: with file_io.FileIO("gs://deeplearningteam11/" + model_name, mode='w+') as output_f: output_f.write(input_f.read())
tensorflow.python.lib.io.file_io.FileIO
196
import tensorflow.contrib as contrib scope="dropout2_2") fc3_1 = contrib.layers.fully_connected(dropout2_1, 32, scope="fc3_1") fc3_2 = contrib.layers.fully_connected(dropout2_2, 32, scope="fc3_2") if cross_stitch_enabled: with tf.variable_scope("cross_stitch_3"): stitch3_1, stitch3_2 = apply_cross_stitch(fc3_1, fc3_2) else: stitch3_1, stitch3_2 = fc3_1, fc3_2 dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2") output_1 = contrib.layers.fully_connected(dropout3_1, n_output_1, activation_fn=None, scope="output_1") output_2 = contrib.layers.fully_connected(dropout3_2, n_output_2, activation_fn=None, scope="output_2") with tf.variable_scope("loss"): loss_base_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_1, logits=output_1)) loss_base_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_2, logits=output_2)) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) loss_total = loss_base_1 + loss_base_2 + tf.reduce_sum(reg_losses) with tf.variable_scope("evaluation"):
tensorflow.contrib.layers.dropout
197
import tensorflow as tf logstd = tf.get_variable( "logstd", mean.shape[2:], tf.float32, logstd_initializer) logstd = tf.tile( logstd[None, None], [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) with tf.variable_scope("value"): x = flat_observations for size in config.value_layers: x = tf.layers.dense(x, size, activation=tf.nn.relu) value = tf.layers.dense(x, 1)[..., 0] mean = tf.check_numerics(mean, "mean") logstd = tf.check_numerics(logstd, "logstd") value = tf.check_numerics(value, "value") policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) def clip_logits(logits, config):
tensorflow.check_numerics
198
from tensorflow.python.framework import tensor_shape logits, labels, name=name) return cost @ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits") def _SparseSoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SparseSoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() input_shape = logits_shape.with_rank(2) batch_size = input_shape[0] # labels_shape op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size)) return [tensor_shape.vector(batch_size.value), input_shape] @ops.RegisterShape("SoftmaxCrossEntropyWithLogits") def _SoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() labels_shape = op.inputs[1].get_shape() input_shape = logits_shape.merge_with(labels_shape).with_rank(2) batch_size = input_shape[0]
tensorflow.python.framework.tensor_shape.vector
199