seed
stringlengths
59
2.16k
seed_api
stringlengths
14
101
index
int64
0
523
import tensorflow as tf indices = helper.make_tensor("indices", TensorProto.INT64, [2, 2], x) a = helper.make_sparse_tensor(values, indices,[3, 4]) node_def = helper.make_node("Constant", [], ["Y"], sparse_value=a) output = run_node(node_def, []) b = tf.sparse_to_dense(output["Y"].indices, output["Y"].dense_shape, output["Y"].values) result = b.eval(session=tf.Session()) np.testing.assert_equal(result, expected)
tensorflow.sparse_to_dense
400
from tensorflow.python.platform import test def testSparseDistributed(self): worker, unused_ps = self._setupCluster() for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with session.Session(worker.target): var0, var1, update_op = self._setupSparse(True, dtype) self._assertSparseCorrect(var0, var1, update_op) if __name__ == "__main__": test.main()
tensorflow.python.platform.test.main
401
from tensorflow.python.ops import gen_nn_ops if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)): raise ValueError("output_shape must have shape (4,), got {}" .format(output_shape_.get_shape())) if isinstance(output_shape, (list, np.ndarray)): # output_shape's shape should be == [4] if reached this point. if not filter.get_shape()[2].is_compatible_with(output_shape[3]): raise ValueError( "output_shape does not match filter's output channels, " "{} != {}".format(output_shape[3], filter.get_shape()[2])) if padding != "VALID" and padding != "SAME": raise ValueError("padding must be either VALID or SAME:" " {}".format(padding)) return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_, filter=filter, out_backprop=value, strides=strides, padding=padding, name=name) # pylint: disable=protected-access def bias_add(value, bias, data_format=None, name=None): """Adds `bias` to `value`. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
tensorflow.python.ops.gen_nn_ops.conv2d_backprop_input
402
import tensorflow as tf # self._eval_image_summary('reco', actual) self._eval_image_summary('pred', expected) self._eval_image_summary('nois', noisy) def _eval_image_summary(self, name, encdoding_batch): summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch}) self.summary_writer.add_summary(summary, global_step=self.get_past_epochs()) def _add_decoding_summary(self, name, var, collection='train'): var = var[:FLAGS.visualiza_max] var = tf.concat(tf.unstack(var), axis=0) var = tf.expand_dims(var, dim=0) color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max) var = tf.expand_dims(var[..., 3], dim=3) bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max) return tf.summary.merge([color_s, bw_s]) # TRAINING PROGRESS EVENTS def _on_training_start(self, sess): # Writers and savers self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph) self.saver = tf.train.Saver() self._build_embedding_saver(sess)
tensorflow.expand_dims
403
from tensorflow.python.ops import data_flow_ops loss = loss_function(logits, labels) params = self.variable_mgr.trainable_variables_on_device(device_num) l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in params]) weight_decay = FLAGS.weight_decay if weight_decay is not None and weight_decay != 0.: loss += weight_decay * l2_loss aggmeth = tf.AggregationMethod.DEFAULT grads = tf.gradients(loss, params, aggregation_method=aggmeth) if FLAGS.staged_vars: grad_dtypes = [grad.dtype for grad in grads] grad_shapes = [grad.shape for grad in grads] grad_stage = data_flow_ops.StagingArea(grad_dtypes, grad_shapes) grad_stage_op = grad_stage.put(grads) # In general, this decouples the computation of the gradients and # the updates of the weights. # During the pipeline warm up, this runs enough training to produce # the first set of gradients. gpu_grad_stage_ops.append(grad_stage_op) grads = grad_stage.get() param_refs = self.variable_mgr.trainable_variables_on_device( device_num, writable=True) gradvars = list(zip(grads, param_refs)) return (loss, gradvars)
tensorflow.python.ops.data_flow_ops.StagingArea
404
import tensorflow as tf graph = tf.Graph() graph_def = tf.compat.v1.GraphDef() import os file_ext = os.path.splitext(model_file)[1] with open(model_file, "rb") as f: if file_ext == '.pbtxt': text_format.Merge(f.read(), graph_def) else: graph_def.ParseFromString(f.read()) with graph.as_default(): tf.import_graph_def(graph_def, name='') tf.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False) return graph if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input_graph", default=None, help="graph/model to be executed") parser.add_argument("--data_location", default=None, help="full path to the validation data") parser.add_argument("--input_height", default=None,
tensorflow.import_graph_def
405
import tensorflow as tf geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0.0, (tgt_larg - tgt_small) - (pred_larg - pred_small)) # loss = tf.maximum(0.0, tf.math.abs(tgt_larg - pred_larg) - tf.math.abs(tgt_small - pred_small))
tensorflow.where
406
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib # TODO(roumposg): Remove when deprecated monitors are removed. hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks
407
from tensorflow.python.ops import init_ops with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127) cell = rnn_cell.LSTMCell( num_units=num_units, initializer=initializer, state_is_tuple=True) multi_cell = rnn_cell.MultiRNNCell(
tensorflow.python.ops.init_ops.random_uniform_initializer
408
import tensorflow as tf batch_size = tf.shape(observations_ph.get())[0] random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph], outputs=output_actions, givens={update_eps_ph: -1.0, stochastic_ph: True}, updates=[update_eps_expr]) def act(ob, stochastic=True, update_eps=-1):
tensorflow.cond
409
import tensorflow as tf for layer in range(num_layers): input_size_ = input_size if layer == 0 else 2 * num_units gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1])
tensorflow.contrib.cudnn_rnn.CudnnGRU
410
from tensorflow.contrib.layers.python.layers import initializers out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
tensorflow.contrib.layers.python.layers.initializers.xavier_initializer
411
from tensorflow.contrib import layers if n_classes < 2: raise ValueError("n_classes should be greater than 1. Given: {}".format( n_classes)) target_column = layers.multi_class_target( n_classes=n_classes, weight_column_name=weight_column_name) super(DNNLinearCombinedClassifier, self).__init__(
tensorflow.contrib.layers.multi_class_target
412
import tensorflow as tf ) num_episodes = tf.minimum(x=num_episodes, y=self.episode_count) assignment = tf.assign( ref=self.episode_indices[:self.episode_count - num_episodes], value=self.episode_indices[num_episodes: self.episode_count] ) # Decrement episode count. with tf.control_dependencies(control_inputs=(assignment,)): assignment = tf.assign_sub(ref=self.episode_count, value=num_episodes) # Assign new observations. with tf.control_dependencies(control_inputs=(assignment,)): assignments = list() for name in sorted(states): assignments.append(tf.scatter_update( ref=self.states_memory[name], indices=indices, updates=states[name] )) for name in sorted(internals): assignments.append(tf.scatter_update( ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=indices,
tensorflow.scatter_update
413
import tensorflow as tf def softmax_loss(self, antecedent_scores, antecedent_labels): gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1] marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k] log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k] return log_norm - marginalized_gold_scores # [k]
tensorflow.reduce_logsumexp
414
import tensorflow as tf if cfgs.IMAGE_PYRAMID: shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN) shortside_len = tf.random_shuffle(shortside_len_list)[0]
tensorflow.random_shuffle
415
import tensorflow.contrib.graph_editor as ge # checkpoint all expensive ops to maximize running speed checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
tensorflow.contrib.graph_editor.filter_ts_from_regex
416
import tensorflow as tf # tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) # tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) # loss = compute_contra_loss(pred1, pred2, tgt1, tgt2) # print(loss) # return loss i = tf.constant(0) loss = tf.constant(0.) final_loss = tf.while_loop(lambda l, i: i < resample, sample_compute, [loss, i])[0] # final_loss = tf.scan(sample_compute, tf.range(resample), loss)[-1] # final_loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems= tf.range(resample), dtype=tf.float32, parallel_iterations=1) # print('final', final_loss) # final_loss = loss avg_loss = tf.reduce_mean(final_loss) / divider # p = tf.print('cur_loss', [final_loss, avg_loss]) # with tf.control_dependencies([p]):
tensorflow.while_loop
417
import tensorflow as tf # Build a simple graph. v0 = tf.Variable(0.0) inc = v0.assign_add(1.0) save = tf.train.Saver({"v0": v0}) # Record a short training history. tf.initialize_all_variables().run() save.save(sess, filepath, global_step=0) inc.eval() save.save(sess, filepath, global_step=1) inc.eval() save.save(sess, filepath, global_step=2)
tensorflow.initialize_all_variables
418
from tensorflow.python.ops import math_ops math_ops.logical_and(label_is_pos, pred_is_pos)) is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg))
tensorflow.python.ops.math_ops.logical_and
419
import tensorflow as tf update_ops = tf.group(*self.update_ops) self.training_op = tf.group(update_ops, optimizer_step) def set_check_ops(self): self._check_ops = 1 # TODO argo2 This is not working anymore with the new session #with self.sess.graph.as_default(): self._numerics_ops = tf.add_check_numerics_ops() def release(self): super().release() self.sess.close() tf.reset_default_graph() def set_summaries(self): """This function sets summaries and summaryFileWriters, it needs to be invoked before training to keep track of the summaries.
tensorflow.add_check_numerics_ops
420
from tensorflow.python.ops import gen_resource_variable_ops def _read_variable_op(self): if _enclosing_tpu_context() is None: return self._primary_var.read_value() v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) return v def read_value(self): return self._read_variable_op() def assign(self, value, use_locking=None, name=None, read_value=False): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): value_tensor = ops.convert_to_tensor(value, dtype=self.dtype) assign_op = gen_resource_variable_ops.assign_variable_op( self.handle, value_tensor, name=name) if read_value: return self._read_variable_op() return assign_op def assign_add(self, delta, use_locking=None, name=None, read_value=True): del use_locking with _handle_graph(self.handle), self._assign_dependencies(): assign_add_op = gen_resource_variable_ops.assign_add_variable_op( self.handle, ops.convert_to_tensor(delta, dtype=self.dtype), name=name)
tensorflow.python.ops.gen_resource_variable_ops.assign_variable_op
421
from tensorflow.python.ops import random_ops raise ValueError("keep_prob must be a scalar tensor or a float in the " "range (0, 1], got %g" % keep_prob) keep_prob = ops.convert_to_tensor( keep_prob, dtype=x.dtype, name="keep_prob") keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) noise_shape = noise_shape or array_ops.shape(x) # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob random_tensor += random_ops.random_uniform( noise_shape, seed=seed, dtype=x.dtype) # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = math_ops.floor(random_tensor) ret = x * math_ops.inv(keep_prob) * binary_tensor ret.set_shape(x.get_shape()) return ret
tensorflow.python.ops.random_ops.random_uniform
422
from tensorflow.python.util import compat hasher = hashlib.sha1() def _hash_func_def(): """Hash the function definition agnostic to node/map ordering.""" def update_num(n): hasher.update(compat.as_bytes("%x" % n)) def update_str(s): update_num(len(s)) hasher.update(compat.as_bytes(s))
tensorflow.python.util.compat.as_bytes
423
import tensorflow as tf reshaped_img[0]))) # For unit testing, use `run_eval_loop=False`. if not run_eval_loop: return tf.contrib.training.evaluate_repeatedly( FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops, max_number_of_evaluations=FLAGS.max_number_of_evaluations) def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually
tensorflow.contrib.training.StopAfterNEvalsHook
424
from tensorflow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary if vocabulary: self.vocabulary_ = vocabulary else: self.vocabulary_ = CategoricalVocabulary(support_reverse=True) if tokenizer_fn: self._tokenizer = tokenizer_fn
tensorflow.contrib.learn.python.learn.preprocessing.text.CategoricalVocabulary
425
from tensorflow.python.ops import math_ops predictions.get_shape().assert_is_compatible_with(labels.get_shape()) cov, update_cov = streaming_covariance( predictions, labels, weights=weights, name='covariance') var_predictions, update_var_predictions = streaming_covariance( predictions, predictions, weights=weights, name='variance_predictions') var_labels, update_var_labels = streaming_covariance( labels, labels, weights=weights, name='variance_labels') pearson_r = _safe_div( cov, math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)), 'pearson_r') with ops.control_dependencies( [update_cov, update_var_predictions, update_var_labels]): update_op = _safe_div(update_cov, math_ops.mul( math_ops.sqrt(update_var_predictions), math_ops.sqrt(update_var_labels)), 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, pearson_r)
tensorflow.python.ops.math_ops.sqrt
426
import tensorflow as tf @private_method def _PS(self, I, r): X = tf.transpose(I, [2, 1, 0]) # (r, w, b) X = tf.batch_to_space_nd(X, [r], [[0, 0]]) # (1, r*w, b) X = tf.transpose(X, [2, 1, 0]) return X
tensorflow.batch_to_space_nd
427
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'precision/positive_threshold_0.500000_mean', metrics) estimator_test_utils.assert_in_range( 0.9, 1.0, 'recall/positive_threshold_0.500000_mean', metrics) self._assertCommonMetrics(metrics) def _assertCommonMetrics(self, metrics): estimator_test_utils.assert_in_range(_ITERS, _ITERS + 5, 'global_step', metrics) estimator_test_utils.assert_in_range(0.9, 1.0, 'accuracy', metrics) estimator_test_utils.assert_in_range(0.0, 0.2, 'loss', metrics) self.report_benchmark( iters=metrics['global_step'], extras={k: v for k, v in metrics.items() if k in _METRIC_KEYS}) def benchmarkMatrixData(self): iris = test_data.prepare_iris_data_for_logistic_regression() cont_feature = feature_column.real_valued_column('feature', dimension=4)
tensorflow.contrib.learn.python.learn.estimators.estimator_test_utils.assert_in_range
428
from tensorflow.contrib.layers.python.layers import feature_column_ops column_types = layers.create_feature_spec_for_parsing(( self._get_linear_feature_columns() or []) + ( self._get_dnn_feature_columns() or [])) features = parsing_ops.parse_example(examples_batch, column_types) return features def _get_linear_feature_columns(self): if not self._linear_feature_columns: return None feature_column_ops.check_feature_columns(self._linear_feature_columns) return sorted(set(self._linear_feature_columns), key=lambda x: x.key) def _get_dnn_feature_columns(self): if not self._dnn_feature_columns: return None feature_column_ops.check_feature_columns(self._dnn_feature_columns) return sorted(set(self._dnn_feature_columns), key=lambda x: x.key) def _dnn_logits(self, features, is_training): return self._dnn_model.build_model( features, self._dnn_feature_columns, is_training) def _linear_logits(self, features, is_training): return self._linear_model.build_model( features, self._linear_feature_columns, is_training) def _centered_bias(self): centered_bias = variables.Variable( array_ops.zeros([self._target_column.num_label_columns]), collections=[self._centered_bias_weight_collection,
tensorflow.contrib.layers.python.layers.feature_column_ops.check_feature_columns
429
import tensorflow as tf for name, op in ops.iteritems(): tf.add_to_collection(name, op) self._initial_state_name = util.with_prefix(self._name, "initial") self._final_state_name = util.with_prefix(self._name, "final") util.export_state_tuples(self._initial_state, self._initial_state_name) util.export_state_tuples(self._final_state, self._final_state_name) def import_ops(self): """Imports ops from collections.""" if self._is_training: self._train_op = tf.get_collection_ref("train_op")[0] self._lr = tf.get_collection_ref("lr")[0] self._new_lr = tf.get_collection_ref("new_lr")[0] self._lr_update = tf.get_collection_ref("lr_update")[0] rnn_params = tf.get_collection_ref("rnn_params") if self._cell and rnn_params: params_saveable = tf.contrib.cudnn_rnn.RNNParamsSaveable( self._cell, self._cell.params_to_canonical, self._cell.canonical_to_params,
tensorflow.get_collection_ref
430
from tensorflow.contrib.learn.python.learn.graph_actions import train with ops.Graph().as_default() as g, g.device(device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) train_op, loss_op = self._get_train_ops(features, targets) return train( graph=g, output_dir=self._model_dir, train_op=train_op, loss_op=loss_op, global_step_tensor=global_step, log_every_steps=log_every_steps,
tensorflow.contrib.learn.python.learn.graph_actions.train
431
import tensorflow as tf mapping_strings, name=name ) word_strings = reverse_vocab_tags.lookup(tf.to_int64(word_ids)) return word_strings def loss_layer(self, preds, ground_true, nwords, crf_params): with tf.name_scope("CRF_log_likelihood"): log_likelihood, _ = tf.contrib.crf.crf_log_likelihood( preds, ground_true, nwords, crf_params ) loss = tf.reduce_mean(-log_likelihood) # regularizer = tf.contrib.layers.l2_regularizer(0.001) # reg = regularizer(embedding_variable) # loss += reg
tensorflow.contrib.crf.crf_log_likelihood
432
import tensorflow as tf def literal(x): return 1.0 if x == 0.0 else 0.0 x = tf.constant(0.0, tf.float64) y, = tf.py_func(literal, [x], [tf.float64]) self.assertAllClose(y.eval(), 1.0) def testStrings(self): def read_fixed_length_numpy_strings(): return np.array([b" there"]) def read_and_return_strings(x, y): return x + y with self.test_session(): x = tf.constant([b"hello", b"hi"], tf.string) y, = tf.py_func(read_fixed_length_numpy_strings, [], [tf.string]) z, = tf.py_func(read_and_return_strings, [x, y], [tf.string]) self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"]) def testLarge(self): with self.test_session() as sess: x = tf.zeros([1000000], dtype=np.float32) y = tf.py_func(lambda x: x + 1, [x], [tf.float32]) z = tf.py_func(lambda x: x * 2, [x], [tf.float32]) for _ in xrange(100): sess.run([y[0].op, z[0].op]) def testNoInput(self): with self.test_session():
tensorflow.constant
433
import tensorflow as tf # x is shaped [batch_size,time_steps,num_inputs] if is_dynamic_rnn: lstm_input = tf.transpose(x, perm=[1, 0, 2]) outputs, _ = tf.lite.experimental.nn.dynamic_rnn( lstm_layer, lstm_input, dtype="float32") outputs = tf.unstack(outputs, axis=0) else: lstm_input = tf.unstack(x, self.time_steps, 1) outputs, _ = tf.nn.static_rnn(lstm_layer, lstm_input, dtype="float32") # Compute logits by multiplying outputs[-1] of shape [batch_size,num_units] # by the softmax layer's out_weight of shape [num_units,n_classes] # plus out_bias prediction = tf.matmul(outputs[-1], out_weights) + out_bias output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
tensorflow.unstack
434
import tensorflow as tf m = tf.reduce_mean(x, 0, True) z = tf.expand_dims(x - m, 2) corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0, 2, 1])), 0) corr_frob_sqr = tf.reduce_sum(tf.square(corr)) corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr))) loss = 0.5 * (corr_frob_sqr - corr_diag_sqr) return loss
tensorflow.diag_part
435
import tensorflow.contrib.slim as slim y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height # Won't be backpropagated to rois anyway, but to save time bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1)) # 'roi_pooling_size', 7 pre_pool_size = cfg.FLAGS.roi_pooling_size * 2 # 把rois对于的特征图上的部分crop出来,然后resize打破14*14的大小 crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops") return slim.max_pool2d(crops, [2, 2], padding='SAME') def _dropout_layer(self, bottom, name, ratio=0.5): return tf.nn.dropout(bottom, ratio, name=name) def _anchor_target_layer(self, rpn_cls_score, name): with tf.variable_scope(name): # 这里的index是对于所有anchor而言
tensorflow.contrib.slim.max_pool2d
436
import tensorflow as tf update_target_expr.append(var_target.assign(var)) update_target_expr = tf.group(*update_target_expr) # compute optimization op (potentially with gradient clipping) gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars) if grad_norm_clipping is not None: for i, (grad, var) in enumerate(gradients): if grad is not None: gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph)) tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph)) if full_tensorboard_log: tf.summary.histogram('rewards', rew_t_ph)
tensorflow.clip_by_norm
437
from tensorflow.python.training import training as train "Adagrad": train.AdagradOptimizer, "Adam": train.AdamOptimizer, "Ftrl": train.FtrlOptimizer, "Momentum": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long "RMSProp": train.RMSPropOptimizer, "SGD": train.GradientDescentOptimizer,
tensorflow.python.training.training.MomentumOptimizer
438
from tensorflow.python.ops import nn_ops coverage_features = tf.expand_dims(coverage, axis=-1) * w_c # [batch_size, passage_len, attention_vec_size] all_features += coverage_features e = tf.reduce_sum(v * tf.tanh(all_features), axis=-1) # [batch_size, passage_len] attn_dist = nn_ops.softmax(e) # [batch_size, passage_len] attn_dist *= passage_mask
tensorflow.python.ops.nn_ops.softmax
439
from tensorflow.contrib.util import make_tensor_proto stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) input_data = np.array([[2061, 318, 428, 30]], dtype='int32') # Boiler-plate request = predict_pb2.PredictRequest() # Set request objects using the tf-serving `CopyFrom` setter method request.model_spec.name = '0' request.model_spec.signature_name = 'serving_default' # This is correct (default constant). request.inputs['input'].CopyFrom(make_tensor_proto(input_data, shape=input_data.shape)) # Boiler-Plate response = stub.Predict(request, timeout) result = response.outputs['output'] print(tf.make_ndarray(result))
tensorflow.contrib.util.make_tensor_proto
440
import tensorflow as tf Returns: the triplet loss. """ with tf.name_scope(name): pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1) neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1) basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha) loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0) return loss def decov_loss(xs, name='decov_loss'):
tensorflow.subtract
441
from tensorflow.python.ops import math_ops predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) weights = _mask_weights(ignore_mask, weights) true_positives, true_positives_update_op = _streaming_true_positives( predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) false_negatives, false_negatives_update_op = _streaming_false_negatives( predictions, labels, weights, metrics_collections=None, updates_collections=None, name=None) def compute_recall(true_positives, false_negatives, name): return math_ops.select( math_ops.greater(true_positives + false_negatives, 0), math_ops.div(true_positives, true_positives + false_negatives), 0, name) recall = compute_recall(true_positives, false_negatives, 'value') with ops.control_dependencies([true_positives_update_op, false_negatives_update_op]): update_op = compute_recall(true_positives, false_negatives, 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, recall) if updates_collections:
tensorflow.python.ops.math_ops.greater
442
from tensorflow.python.ops import math_ops indices_at_minval = math_ops.to_int64(indices_at_minval) indices_at_minval = math_ops.cumsum(indices_at_minval)
tensorflow.python.ops.math_ops.cumsum
443
from tensorflow.python.ops import check_ops sample_ndims = ndims - self.batch_ndims - self.event_ndims if self.validate_args: sample_ndims = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(sample_ndims)], sample_ndims) return sample_ndims
tensorflow.python.ops.check_ops.assert_non_negative
444
from tensorflow.examples.tutorials.mnist import input_data # Number of steps to train model. TRAIN_STEPS = 1 CONFIG = tf.ConfigProto(device_count={"GPU": 0}) class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase): def setUp(self): tf.reset_default_graph() # Import MNIST dataset self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Define constants # Unrolled through 28 time steps self.time_steps = 28 # Rows of 28 pixels self.n_input = 28 # Learning rate for Adam optimizer self.learning_rate = 0.001 # MNIST is meant to be classified in 10 classes(0-9). self.n_classes = 10 # Batch size
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
445
from tensorflow.python.framework import ops ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state], trainable_variables) training_op = control_flow_ops.group(*gradients) self._BenchmarkOp(training_op, "tf_rnn_lstm %s %s" % (config_name, self._GetConfigDesc(config))) def benchmarkTfRNNLSTMBlockCellTraining(self): test_configs = self._GetTestConfig() for config_name, config in test_configs.items(): num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): inputs = seq_length * [ array_ops.zeros([batch_size, num_units], dtypes.float32) ] cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop multi_cell = rnn_cell.MultiRNNCell( [cell() for _ in range(num_layers)]) outputs, final_state = core_rnn.static_rnn( multi_cell, inputs, dtype=dtypes.float32) trainable_variables = ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) gradients = gradients_impl.gradients([outputs, final_state], trainable_variables) training_op = control_flow_ops.group(*gradients)
tensorflow.python.framework.ops.Graph
446
import tensorflow as tf if not run_eval_loop: return tf.contrib.training.evaluate_repeatedly( FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops,
tensorflow.contrib.training.SummaryAtEndHook
447
from tensorflow.contrib import metrics as metrics_lib # TODO(zakaria): support weights. def _targets_streaming_mean(unused_predictions, targets): return metrics_lib.streaming_mean(targets)
tensorflow.contrib.metrics.streaming_mean
448
from tensorflow.python.ops import nn def logits_to_predictions(self, logits, proba=False): if self.num_label_columns == 1: logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1) if proba: return nn.softmax(logits) else: return math_ops.argmax(logits, 1) def _default_eval_metrics(self): if self._num_label_columns == 1:
tensorflow.python.ops.nn.softmax
449
from tensorflow.contrib.framework.python.ops import variables as contrib_variables return True class RunHookAdapterForMonitors(session_run_hook.SessionRunHook): """Wraps monitors into a SessionRunHook.""" def __init__(self, monitors): self._monitors = monitors def begin(self): self._last_step = None self._global_step_tensor = contrib_variables.get_global_step() for m in self._monitors: m.begin(max_steps=None) def before_run(self, run_context): if self._last_step is None: self._last_step = run_context.session.run(self._global_step_tensor) + 1 request = {self._global_step_tensor: self._global_step_tensor} monitor_fetches = [] for m in self._monitors:
tensorflow.contrib.framework.python.ops.variables.get_global_step
450
import tensorflow as tf # Test exact match without wildcards. for f in files: self.assertEqual(tf.matching_files(f.name).eval(), tf.compat.as_bytes(f.name)) # We will look for files matching "ABxDEF.GH*" where "x" is some wildcard. pos = files[0].name.find(cases[0]) pattern = files[0].name[:pos] + 'AB%sDEF.GH*' self.assertEqual(set(tf.matching_files(pattern % 'z').eval()), self._subset(files, [1])) self.assertEqual(set(tf.matching_files(pattern % '?').eval()), self._subset(files, [0, 1, 3, 4])) self.assertEqual(set(tf.matching_files(pattern % '*').eval()), self._subset(files, [0, 1, 2, 3, 4, 5])) self.assertEqual(set(tf.matching_files(pattern % '[cxz]').eval()), self._subset(files, [0, 1])) self.assertEqual(set(tf.matching_files(pattern % '[0-9]').eval()), self._subset(files, [3, 4])) if __name__ == '__main__': tf.test.main()
tensorflow.matching_files
451
import tensorflow as tf self._moving_mean = tf.get_variable( "moving_mean", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.zeros_initializer(), trainable=False) self._moving_variance = tf.get_variable( "moving_variance", shape=self._mean_shape, collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.ones_initializer(), trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" # We use the moving mean as an estimate of the mean in order to perform # a more numerically stable calculation of the batch mean. # Copy for better stability. shift = tf.add(self._moving_mean, 0) counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics( input_batch, reduction_indices,
tensorflow.ones_initializer
452
from tensorflow.python.ops import math_ops # whether we should use the first or last index in case of ties. min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
tensorflow.python.ops.math_ops.abs
453
import tensorflow as tf pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1])
tensorflow.slice
454
from tensorflow.python.ops import array_ops count = _create_local('count', shape=[]) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) num_values = math_ops.reduce_sum(_broadcast_weights(weights, values)) else: num_values = math_ops.to_float(array_ops.size(values)) total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) count_compute_op = state_ops.assign_add(count, num_values) mean = _safe_div(total, count, 'value') with ops.control_dependencies([total_compute_op, count_compute_op]):
tensorflow.python.ops.array_ops.size
455
from tensorflow.python.ops import random_ops return constant_op.constant([], dtype=dtypes.int32) def _get_event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): """See the documentation for tf.random_gamma for more details.""" return 1. / random_ops.random_gamma([n], self.alpha, beta=self.beta, dtype=self.dtype, seed=seed) def _log_prob(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) return (self.alpha * math_ops.log(self.beta) -
tensorflow.python.ops.random_ops.random_gamma
456
import tensorflow as tf A tuple containing the observation placeholder and the processed observation placeholder respectively. """ eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) policy = q_func(sess, ob_space, ac_space, 1, 1, None, layers=layers) obs_phs = (policy.obs_ph, policy.processed_obs) deterministic_actions = tf.argmax(policy.q_values, axis=1) ######################### ### KEVIN UPDATE ######## ### GIMME DAT PRINTS #### #########################
tensorflow.argmax
457
import tensorflow as tf out[0] = tf.Print(out[0], [out[0], self.W_rec, Omega], "omega grads")
tensorflow.Print
458
from tensorflow.python.ops import math_ops "xw_plus_b_v1" is used. Returns: A 2-D Tensor computing matmul(x, weights) + biases. Dimensions typically: batch, out_units. """ with ops.op_scope([x, weights, biases], name, "xw_plus_b_v1") as name: x = ops.convert_to_tensor(x, name="x") weights = ops.convert_to_tensor(weights, name="weights") biases = ops.convert_to_tensor(biases, name="biases") mm = math_ops.matmul(x, weights) return bias_add_v1(mm, biases, name=name) # pylint: disable=invalid-name def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): """Computes dropout. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
tensorflow.python.ops.math_ops.matmul
459
import tensorflow as tf epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable('scale',[x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0)) out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset
tensorflow.truncated_normal_initializer
460
from tensorflow.python.ops import math_ops return math_ops.log(self._cdf(x)) def _cdf(self, x): x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if self.validate_args else [], x) # Note that igammac returns the upper regularized incomplete gamma # function Q(a, x), which is what we want for the CDF. return math_ops.igammac(self.alpha, self.beta / x) @distribution_util.AppendDocstring( """This is defined to be ``` entropy = alpha - log(beta) + log(Gamma(alpha))
tensorflow.python.ops.math_ops.igammac
461
import tensorflow as tf Z2 = tf.matmul(Z, A[:,:,n_basis//2:,:])/tf.sqrt(n_basis*.5) # Compute u_{h+1} and v_{h+1} U, V = tf.cos(Z1)+tf.cos(Z2), tf.sin(Z1)+tf.sin(Z2) Z = tf.concat([U, V], 3)/tf.sqrt(n_out*1.)
tensorflow.sin
462
import tensorflow as tf raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col)) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0] unique_rel_indices = tf.unique(self.rel_input)[0] entity_constraint = self._norm_constraint_op(self.entity_embedding_vars, unique_ent_indices, self.maxnorm) rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, unique_rel_indices, rel_maxnorm) self.post_step = [entity_constraint, rel_constraint]
tensorflow.unique
463
import tensorflow as tf # ------------------------------------------------ # Random initialization Load weights from weights path # for Initial state, Weight matrices, and bias weights # ------------------------------------------------ if self.load_weights_path is None: # random initializations init_state_initializer = tf.random_normal_initializer(mean=0.1, stddev=0.01) W_in_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_rec, self.N_in))) W_rec_initializer = tf.constant_initializer(self.initial_W()) W_out_initializer = tf.constant_initializer( 0.1 * np.random.uniform(-1, 1, size=(self.N_out, self.N_rec))) b_rec_initializer = tf.constant_initializer(0.0) b_out_initializer = tf.constant_initializer(0.0) else: print("Loading Weights") weights = np.load(self.load_weights_path) init_state_initializer = tf.constant_initializer(weights['init_state']) W_in_initializer = tf.constant_initializer(weights['W_in']) W_rec_initializer = tf.constant_initializer(weights['W_rec']) W_out_initializer = tf.constant_initializer(weights['W_out']) b_rec_initializer = tf.constant_initializer(weights['b_rec']) b_out_initializer = tf.constant_initializer(weights['b_out']) self.input_connectivity_mask = weights['input_Connectivity'] self.recurrent_connectivity_mask = weights['rec_Connectivity'] self.output_connectivity_mask = weights['output_Connectivity']
tensorflow.constant_initializer
464
from tensorflow.python.ops import math_ops Returns: Loss tensor. """ target = target[self.name] if isinstance(target, dict) else target loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") else: loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,)) loss_weighted = math_ops.mul( loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
tensorflow.python.ops.math_ops.reduce_mean
465
from tensorflow.python.training import training_util def _model_builder(self): """Creates a model function.""" def _model_fn(features, labels, mode, config): """Model function.""" assert labels is None, labels (all_scores, model_predictions, losses, training_op, init_op, is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features), self._training_initial_clusters, self._num_clusters, self._random_seed, self._covariance_type, self._params) incr_step = state_ops.assign_add(training_util.get_global_step(), 1) loss = math_ops.reduce_sum(losses) training_op = with_dependencies([training_op, incr_step], loss) training_hooks = [_InitializeClustersHook( init_op, is_initialized, config.is_chief)] predictions = { GMM.ALL_SCORES: all_scores[0], GMM.ASSIGNMENTS: model_predictions[0][0], } eval_metric_ops = { GMM.SCORES: _streaming_sum(loss), } return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions, eval_metric_ops=eval_metric_ops, loss=loss, train_op=training_op,
tensorflow.python.training.training_util.get_global_step
466
from tensorflow.python.framework import tensor_shape """ if tensor_dtype is None: if not inputs or not isinstance(inputs, (list, tuple)): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs) if not all(isinstance(x, ops.Tensor) for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") if not all(x.dtype == inputs[0].dtype for x in inputs): raise ValueError("inputs must be a list of at least one Tensor with the " "same dtype and shape") tensor_dtype = inputs[0].dtype if shape is not None: shape = tensor_shape.as_shape(shape) else: shape = tensor_shape.unknown_shape() for input_tensor in inputs: if isinstance(input_tensor, ops.Tensor): shape = shape.merge_with(input_tensor.get_shape()) if not shape.is_fully_defined(): # TODO(pbar): Make a version of assign_add that accepts an uninitialized # lvalue, and takes its shape from that? This would allow accumulate_n to # work in all situations that add_n currently works. raise ValueError("Cannot infer the shape of the accumulator for " "accumulate_n. Pass the shape argument, or set the shape " "of at least one of the inputs.") with ops.op_scope(inputs, name, "AccumulateN") as name:
tensorflow.python.framework.tensor_shape.as_shape
467
import tensorflow as tf return self._build_rnn_graph_cudnn(inputs, config, is_training) else: return self._build_rnn_graph_lstm(inputs, config, is_training) def _build_rnn_graph_cudnn(self, inputs, config, is_training): """Build the inference graph using CUDNN cell.""" inputs = tf.transpose(inputs, [1, 0, 2]) self._cell = tf.contrib.cudnn_rnn.CudnnLSTM( num_layers=config.num_layers, num_units=config.hidden_size, input_size=config.hidden_size, dropout=1 - config.keep_prob if is_training else 0) params_size_t = self._cell.params_size() self._rnn_params = tf.get_variable(
tensorflow.contrib.cudnn_rnn.CudnnLSTM
468
import tensorflow as tf offset: (height, width) Returns: """ with tf.variable_scope('anchor_generator'): if offset is None: offset = [stride[0]/2, stride[1]/2] features_width = tf.cast(features_width, tf.int32) features_height = tf.cast(features_height, tf.int32) scales = tf.convert_to_tensor(scales, dtype=tf.float32) ratios = tf.convert_to_tensor(ratios, dtype=tf.float32) offset = tf.convert_to_tensor(offset, dtype=tf.float32) scales_grid, ratios_grid = tf.meshgrid(scales, ratios) scales_grid = tf.reshape(scales_grid, [-1, 1]) ratios_grid = tf.reshape(ratios_grid, [-1, 1]) ratio_sqrts = tf.sqrt(ratios_grid)
tensorflow.convert_to_tensor
469
import tensorflow as tf print(sess.run(tf.self_adjoint_eig(D))) print(sess.run(tf.div(13, 4))) print(sess.run(tf.truediv(13, 4))) print(sess.run(tf.floordiv(13, 4))) print(sess.run(tf.mod(13.2, 4))) print(sess.run(tf.cross([1, 0, 0], [0, 1, 0]))) print(sess.run(tf.square([1, 2, 3]))) def custom_polynomial(local_tf, value): return local_tf.subtract(3 * local_tf.square(value), value) + 10
tensorflow.cross
470
from tensorflow.python.ops import nn `predictions`, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ default_name = _at_k_name('recall', k, class_id=class_id) with ops.name_scope(name, default_name, (predictions, labels)) as scope: _, top_k_idx = nn.top_k(predictions, k) top_k_idx = math_ops.to_int64(top_k_idx) weights = _mask_weights(ignore_mask, weights) tp, tp_update = _streaming_sparse_true_positive_at_k( predictions_idx=top_k_idx, labels=labels, k=k, class_id=class_id, weights=weights)
tensorflow.python.ops.nn.top_k
471
from tensorflow.contrib.tensorboard.plugins import projector self.encode.get_shape().as_list()[1]] tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv') self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding') self.embedding_test = tf.Variable(tf.random_normal(embedding_shape), name='test_embedding', trainable=False) self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph) self.embedding_saver = tf.train.Saver(var_list=[self.embedding_test]) config = projector.ProjectorConfig() embedding = config.embeddings.add() embedding.tensor_name = self.embedding_test.name embedding.sprite.image_path = './sprite.png' embedding.sprite.single_image_dim.extend([80, 80]) embedding.metadata_path = './metadata.tsv' projector.visualize_embeddings(self.summary_writer, config) sess.run(tf.variables_initializer([self.embedding_test], name='init_embeddings')) # build sprite image ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png')) ut.generate_tsv(len(self.test_set), tsv_path) def _add_loss_summary(self, name, var, collection='train'): if var is not None: tf.summary.scalar(name, var, [collection]) tf.summary.scalar('log_' + name, tf.log(var), [collection]) def _restore_model(self, session): latest_checkpoint = self.get_latest_checkpoint()
tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings
472
import tensorflow as tf project=params.platform.gcp_project) tpu_grpc_url = tpu_cluster_resolver.get_master() tf.Session.reset(tpu_grpc_url)
tensorflow.Session.reset
473
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate eval_dict = self._get_eval_ops(features, targets, metrics or self._get_default_metric_functions()) eval_results, _ = evaluate( graph=g,
tensorflow.contrib.learn.python.learn.graph_actions.evaluate
474
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: predictions: `Tensor` or `dict` of `Tensor` objects. """ targets = tensor_signature.create_placeholders_from_signatures( self._targets_info) predictions, _ = self._model_fn(features, targets, ModeKeys.INFER) return predictions def _get_default_metric_functions(self): """Method that provides default metric operations. Returns: a dictionary of metric operations.
tensorflow.contrib.learn.python.learn.estimators.tensor_signature.create_placeholders_from_signatures
475
from tensorflow.python.framework import ops else: used_mean, used_var = mean, var cur_mean, cur_var = used_mean, used_var # update variables if train: with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]): with ops.colocate_with(mean): new_mean = tf.assign_sub( mean, tf.check_numerics( decay * (mean - cur_mean), "NaN in moving mean.")) with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]): with ops.colocate_with(var): new_var = tf.assign_sub( var, tf.check_numerics(decay * (var - cur_var), "NaN in moving variance.")) with tf.name_scope(name, "IncrementTime", [step]): with ops.colocate_with(step): new_step = tf.assign_add(step, 1.) used_var += 0. * new_mean * new_var * new_step used_var += epsilon return used_mean, used_var
tensorflow.python.framework.ops.colocate_with
476
import tensorflow as tf sigma = tf.clip_by_value(sigma, 0.0, 1.0) norm_dist = tf.distributions.Normal(loc=mu * self.a_bound, scale=sigma) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name) return norm_dist, params def build_cnet(self, state_in, name, reuse=False): reg = tf.contrib.layers.l2_regularizer(1e-3) with tf.variable_scope(name, reuse=reuse): layer_c1 = tf.layers.dense(state_in, 512, tf.nn.relu, kernel_regularizer=reg) layer_c2 = tf.layers.dense(layer_c1, 256, tf.nn.relu, kernel_regularizer=reg) vf = tf.layers.dense(layer_c2, 1, kernel_regularizer=reg) params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
tensorflow.contrib.layers.l2_regularizer
477
import tensorflow as tf the decoded items, a list of tensor. """ context, sequence = tf.parse_single_sequence_example( serialized_example, self._context_keys_to_features, self._sequence_keys_to_features) tokens_raw = sequence[self.tokens_feature_name] tokens = tf.string_split(tokens_raw, delimiter=self.delimiter).values # Optionally prepend a special token if self.prepend_token is not None: tokens = tf.concat([[self.prepend_token], tokens], 0) # Optionally append a special token
tensorflow.string_split
478
from tensorflow.python.ops import nn_ops tf.reset_default_graph() time.sleep(1) if dense: td = 0 with tf.device("/gpu:0"): conv = nn_ops.conv3d(d1, d2, strides, padding) with tf.Session(config=config) as sess: t22 = time.time() expected = sess.run(conv) t11 = time.time() for i in range(0, num_trials): sess.run(conv)
tensorflow.python.ops.nn_ops.conv3d
479
from tensorflow.python.util import deprecation to run exclusively on the elected chief worker. """ @deprecation.deprecated( "2016-12-05", "Monitors are deprecated. Please use tf.train.SessionRunHook.")
tensorflow.python.util.deprecation.deprecated
480
import tensorflow as tf b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.contrib.layers.xavier_initializer()) x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = tf.nn.bias_add(x, b, name='bias_add')
tensorflow.contrib.layers.xavier_initializer
481
import tensorflow as tf supports.append(calculate_scaled_laplacian(adj_mx, lambda_max=None)) for support in supports: self._supports.append(self._build_sparse_matrix(support)) @staticmethod def _build_sparse_matrix(L): L = L.tocoo() indices = np.column_stack((L.row, L.col)) L = tf.SparseTensor(indices, L.data, L.shape) return tf.sparse_reorder(L) @property def output_size(self): output_size = self._num_nodes * self._num_units if self._num_proj is not None: output_size = self._num_nodes * self._num_proj return output_size @staticmethod
tensorflow.sparse_reorder
482
from tensorflow.python.framework import ops img_path = sys.argv[1] img = image.load_img(img_path, target_size=(224,224)) #299,299)) #224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x def register_gradient(): if "GuidedBackProp" not in ops._gradient_registry._registry: @ops.RegisterGradient("GuidedBackProp") def _GuidedBackProp(op, grad): dtype = op.inputs[0].dtype return grad * tf.cast(grad > 0., dtype) * \ tf.cast(op.inputs[0] > 0., dtype) def compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98 input_img = model.input layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
tensorflow.python.framework.ops.RegisterGradient
483
import tensorflow as tf of true images in the resized images, as resized images can be padded with zeros. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions. """ flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected( flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return { 'class_predictions_with_background': tf.reshape( class_prediction, [-1, 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [-1, 1, 4])
tensorflow.contrib.layers.fully_connected
484
from tensorflow.contrib.learn.python.learn.estimators import composable_model num_ps_replicas = config.num_ps_replicas if config else 0 self._linear_model = composable_model.LinearComposableModel( num_label_columns=target_column.num_label_columns,
tensorflow.contrib.learn.python.learn.estimators.composable_model.LinearComposableModel
485
import tensorflow as tf group_size: int, size of image groups. Returns: Minibatch standard deviation feature map image added to channels of shape [cur_batch_size, image_size, image_size, 1]. """ with tf.variable_scope( "{}/grouped_minibatch_stddev".format(self.name)): # The group size should be less than or equal to the batch size. # shape = () group_size = tf.minimum( x=group_size, y=cur_batch_size, name="group_size" ) print_obj("grouped_minibatch_stddev", "group_size", group_size) # Split minibatch into M groups of size group_size, rank 5 tensor. # shape = ( # group_size, # cur_batch_size / group_size, # image_size, # image_size,
tensorflow.minimum
486
from tensorflow.python.ops import state_ops total_compute_op = state_ops.assign_add(total, math_ops.reduce_sum(values)) count_compute_op = state_ops.assign_add(count, num_values)
tensorflow.python.ops.state_ops.assign_add
487
import tensorflow as tf pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32)
tensorflow.floordiv
488
import tensorflow as tf one_hot_labels = tf.one_hot(
tensorflow.one_hot
489
from tensorflow.python.ops import nn if not target.dtype.is_integer: raise ValueError("Target's dtype should be integer " "Instead got %s." % target.dtype) # sparse_softmax_cross_entropy_with_logits requires [batch_size] target. if len(target.get_shape()) == 2: target = array_ops.squeeze(target, squeeze_dims=[1]) loss_vec = nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=logits) return loss_vec def _run_metrics(predictions, labels, metrics, weights):
tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits
490
from tensorflow.python.training import summary_io super(SummarySaver, self).__init__(every_n_steps=save_steps) self._summary_op = summary_op self._summary_writer = summary_writer if summary_writer is None and output_dir: self._summary_writer = summary_io.SummaryWriter(output_dir) self._scaffold = scaffold # TODO(mdan): Throw an error if output_dir and summary_writer are None.
tensorflow.python.training.summary_io.SummaryWriter
491
import tensorflow as tf "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64), "is_real_example": tf.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t)
tensorflow.parse_single_example
492
from tensorflow.python.framework import ops if as_ref: return self.handle else: return self.read_value() # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion) if not TF_23: ops.register_dense_tensor_like_type(ReplicatedVariable)
tensorflow.python.framework.ops.register_tensor_conversion_function
493
import tensorflow as tf unions = ( tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections) return tf.where( tf.equal(intersections, 0.0), tf.zeros_like(intersections), tf.truediv(intersections, unions))
tensorflow.truediv
494
import tensorflow as tf self.alpha_c = alpha_c self.selector = selector self.dropout = dropout self.V = len(word_to_idx) self.L = dim_feature[0] self.D = dim_feature[1] self.M = dim_embed self.H = dim_hidden self.T = n_time_step self._start = word_to_idx['<START>'] self._null = word_to_idx['<NULL>'] self.weight_initializer = tf.contrib.layers.xavier_initializer() self.const_initializer = tf.constant_initializer(0.0) self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0) # Place holder for features and captions self.features = tf.placeholder(tf.float32, [None, self.L, self.D]) self.captions = tf.placeholder(tf.int32, [None, self.T + 1]) def _get_initial_lstm(self, features): with tf.variable_scope('initial_lstm'): features_mean = tf.reduce_mean(features, 1) w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer) b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer) h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h)
tensorflow.random_uniform_initializer
495
from tensorflow.python.ops import array_ops bg_row=tf.shape(b_grads[0])[0] bg_col=tf.shape(b_grads[0])[1] b_grads = tf.reshape(b_grads, (numTensors * bg_row, bg_col)) if adj_b: b_grads = [array_ops.transpose(b_g) for b_g in b_grads] for t in range(numTensors): rows = a_indices[t][:, 0] cols = a_indices[t][:, 1] parts_a = array_ops.gather(grad[t], rows if not adj_a else cols) parts_b = array_ops.gather(b_list[t] if not adj_b else array_ops.transpose(b_list[t]), cols if not adj_a else rows) a_values_grads.append(math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)) return_val = [None for _ in range(numTensors)] + a_values_grads + [None for _ in range(numTensors)] + [b_grads] return tuple(return_val)
tensorflow.python.ops.array_ops.gather
496
from tensorflow.python.framework import tensor_shape return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] return [op.inputs[1].get_shape()] @ops.RegisterShape("AssignAdd") @ops.RegisterShape("AssignSub") def _AssignUpdateShape(op): """Shape function for the AssignAdd and AssignSub dense update ops.""" return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())] @ops.RegisterShape("CountUpTo") def _CountUpToShape(op): """Shape function for the CountUpTo op.""" return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())] @ops.RegisterShape("ScatterAdd") @ops.RegisterShape("ScatterSub") @ops.RegisterShape("ScatterUpdate") def _ScatterUpdateShape(op): """Shape function for the sparse update ops.""" var_shape = op.inputs[0].get_shape() indices_shape = op.inputs[1].get_shape() unused_updates_shape = op.inputs[2].get_shape().merge_with( indices_shape.concatenate(var_shape[1:])) return [var_shape]
tensorflow.python.framework.tensor_shape.scalar
497
import tensorflow as tf # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set (trainImages, trainLabels) = cifar.getTrainBatch() (testImages, testLabels) = cifar.getTestBatch() _, summary_str = sess.run([optimiser, training_summary], feed_dict={x: trainImages, y_: trainLabels, train: True}) if step % (FLAGS.log_frequency + 1) == 0: summary_writer.add_summary(summary_str, step)
tensorflow.local_variables_initializer
498
from tensorflow.python.ops import gradients linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)]
tensorflow.python.ops.gradients.gradients
499