seed
stringlengths
25
1.88k
seed_api
stringlengths
14
102
index
int64
0
1.05k
import tensorflow as tf correct_prediction_action = tf.equal( tf.argmax(one_hot_labels_action, 1), tf.argmax(self.predictions_action, 1) ) self.accuracy_action = tf.reduce_mean(tf.cast(correct_prediction_action, 'float')) tf.scalar_summary('accuracy_action', self.accuracy_action) correct_prediction_arguments = tf.equal(tf.argmax(one_hot_labels_arguments, 2), tf.argmax(self.predictions_arguments, 2)) self.accuracy_arguments = tf.reduce_mean(tf.cast(correct_prediction_arguments, 'float'))
tensorflow.scalar_summary
1,000
import tensorflow as tf edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return: A tuple of `SparseTensor` (neibors, weights). neighbors: A `SparseTensor` of `int64`. weights: A `SparseTensor` of `float`. types: A `SparseTensor` of `int32` """ sp_returns = base._LIB_OP.get_full_neighbor(nodes, edge_types) return tf.SparseTensor(*sp_returns[:3]), tf.SparseTensor(*sp_returns[3:6]), \ tf.SparseTensor(*sp_returns[6:]) def get_sorted_full_neighbor(nodes, edge_types): """ Args: nodes: A `Tensor` of `int64`. edge_types: A 1-D `Tensor` of int32. Specify edge types to filter outgoing edges. Return:
tensorflow.SparseTensor
1,001
from tensorflow.python.ops import state_ops new_value = array_ops.zeros(next_shape, dtype=values.dtype) old_value = array.value() assign_op = state_ops.assign(array, new_value, validate_shape=False) with ops.control_dependencies([assign_op]):
tensorflow.python.ops.state_ops.assign
1,002
import tensorflow as tf # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries)) bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices)) counts = tf_utils.reorder_histogram(bucket_vocab, counts, tf.size(boundaries) - 1) return counts, boundaries
tensorflow.strings.as_string
1,003
import tensorflow as tf import gpflow from gpflow.ci_utils import ci_niter from gpflow import set_trainable from multiclass_classification import plot_from_samples, colors gpflow.config.set_default_float(np.float64) gpflow.config.set_default_jitter(1e-4) gpflow.config.set_default_summary_fmt("notebook") # convert to float64 for tfp to play nicely with gpflow in 64 f64 = gpflow.utilities.to_default_float tf.random.set_seed(123) # %matplotlib inline # %% [markdown] # # In this notebook, we provide three examples: # # * [Example 1](#Example-1:-GP-regression): Sampling hyperparameters in Gaussian process regression # * [Example 2](#Example-2:-Sparse-MC-for-multiclass-classification): Sparse Variational MC applied to the multiclass classification problem # * [Example 3](#Example-3:-Fully-Bayesian-inference-for-generalized-GP-models-with-HMC): Full Bayesian inference for Gaussian process models
tensorflow.random.set_seed
1,004
import tensorflow as tf from sklearn.metrics import classification_report slim = tf.contrib.slim global first first = True classnum=12 testnum = tf.placeholder(tf.int32) trainnum = tf.placeholder(tf.int32) validnum = tf.placeholder(tf.int32) learnrate = tf.placeholder(tf.float32) def getinputs(path): filename_queue=tf.train.string_input_producer([path]) reader=tf.TFRecordReader() _,serialized_example=reader.read(filename_queue) features=tf.parse_single_example(serialized_example, features={ 'label':tf.FixedLenFeature([], tf.int64), 'img_raw' : tf.FixedLenFeature([], tf.string), }) image=tf.decode_raw(features['img_raw'],tf.uint8) label=tf.cast(features['label'],tf.int32) image=tf.reshape(image,[4096,1]) return image,label def get_batch(image,label,batch_size,crop_size): #print(image.shape) #print(label.shape)
tensorflow.TFRecordReader
1,005
import tensorflow as tf cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(24)] * 2, state_is_tuple=True) return tf.nn.seq2seq.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=classes, num_decoder_symbols=classes, embedding_size=24, output_projection=(w, b)) targets = [dec_inp[i+1] for i in range(len(dec_inp) - 1)] + [0] def SampledLoss(labels, inputs): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, 8, classes) return tf.nn.seq2seq.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model. batch_size = 8 inp = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)] out = [tf.placeholder(tf.int32, shape=[None]) for _ in range(8)]
tensorflow.nn.sampled_softmax_loss
1,006
from tensorflow.python.platform import gfile s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEqual(0, len(gfile.Glob(s1))) self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1))) self.assertEqual(2, len(gfile.Glob(s2)))
tensorflow.python.platform.gfile.Glob
1,007
from tensorflow.python.framework import ops """Calculates the on-disk weight parameters for BiasAdd.""" bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) bias_shape.assert_is_fully_defined() bias_count = np.prod(bias_shape.as_list()) return ops.OpStats("weight_parameters", bias_count) def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
tensorflow.python.framework.ops.OpStats
1,008
import tensorflow as tf initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 14*14 self.deconv_2 = self.deconv_bn_relu(self.deconv_1, name = 'deconv_2',kernel_size = 3, output_channels = 512, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 28*28 self.deconv_3 = self.deconv_bn_relu(self.deconv_2, name = 'deconv_3',kernel_size = 3, output_channels = 256, initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 56*56 self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112 self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64, initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224 # self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer') self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False)
tensorflow.contrib.layers.variance_scaling_initializer
1,009
import tensorflow as tf drop_remainder=predict_drop_remainder) result = estimator.predict(input_fn=predict_input_fn) output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.gfile.GFile(output_predict_file, "w") as writer: tf.logging.info("***** Predict results *****") for prediction in result: output_line = "\t".join( str(class_probability) for class_probability in prediction) + "\n"
tensorflow.gfile.GFile
1,010
import tensorflow as tf if FLAGS.write_to_disk: image_write_ops = tf.write_file( '%s/%s'% (FLAGS.eval_dir, 'conditional_gan.png'), tf.image.encode_png(data_provider.float_image_to_uint8( reshaped_img[0]))) # For unit testing, use `run_eval_loop=False`. if not run_eval_loop: return tf.contrib.training.evaluate_repeatedly( FLAGS.checkpoint_dir, hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir), tf.contrib.training.StopAfterNEvalsHook(1)], eval_ops=image_write_ops, max_number_of_evaluations=FLAGS.max_number_of_evaluations) def _get_generator_inputs(num_images_per_class, num_classes, noise_dims): # Since we want a grid of numbers for the conditional generator, manually # construct the desired class labels. num_images_generated = num_images_per_class * num_classes noise = tf.random_normal([num_images_generated, noise_dims]) labels = [lbl for lbl in range(num_classes) for _
tensorflow.contrib.training.StopAfterNEvalsHook
1,011
from tensorflow.python.ops import check_ops self.event_ndims): ndims = tensor_util.constant_value(ndims) sample_ndims = (ndims - self._batch_ndims_static - self._event_ndims_static) if sample_ndims < 0: raise ValueError( "expected batch_ndims(%d) + event_ndims(%d) <= ndims(%d)" % (self._batch_ndims_static, self._event_ndims_static, ndims)) return ops.convert_to_tensor(sample_ndims, name="sample_ndims") else: with ops.name_scope(name="sample_ndims"): sample_ndims = ndims - self.batch_ndims - self.event_ndims if self.validate_args: sample_ndims = control_flow_ops.with_dependencies( [check_ops.assert_non_negative(sample_ndims)], sample_ndims) return sample_ndims def get_dims(self, x, name="get_dims"): """Returns dimensions indexing `sample_shape`, `batch_shape`, `event_shape`. Example: ```python x = ... # Tensor with shape [4, 3, 2, 1] sample_dims, batch_dims, event_dims = _DistributionShape( batch_ndims=2, event_ndims=1).get_dims(x) # sample_dims == [0] # batch_dims == [1, 2]
tensorflow.python.ops.check_ops.assert_non_negative
1,012
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops config = test_configs[config_name] num_layers = config["num_layers"] num_units = config["num_units"] batch_size = config["batch_size"] seq_length = config["seq_length"] with ops.Graph().as_default(), ops.device("/device:GPU:0"): model = cudnn_rnn_ops.CudnnLSTM(num_layers, num_units, num_units) params_size_t = model.params_size() input_data = variables.Variable( array_ops.ones([seq_length, batch_size, num_units])) input_h = variables.Variable( array_ops.ones([num_layers, batch_size, num_units])) input_c = variables.Variable(
tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops.CudnnLSTM
1,013
import tensorflow as tf print ("creating protobuf...") g_1 = tf.get_default_graph() with tf.Session(graph = g_1) as sess: saver = tf.train.import_meta_graph('save/model.ckpt.meta', clear_devices=True) saver.restore(sess, ckpt_name) graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, dst_nodes) tf.train.write_graph(tf.graph_util.extract_sub_graph(graph_def, dst_nodes), path, fname, as_text=False)
tensorflow.graph_util.convert_variables_to_constants
1,014
import tensorflow.contrib.graph_editor as ge bwd_inputs = [t for op in bwd_ops for t in op.inputs] # list of tensors in forward graph that is in input to bwd graph ts_filtered = list(set(bwd_inputs).intersection(ts_all)) debug_print("Using tensors %s", ts_filtered) # try two slightly different ways of getting bottlenecks tensors # to checkpoint for ts in [ts_filtered, ts_all]: # get all bottlenecks in the graph bottleneck_ts = [] for t in ts: b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops)) f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops)) # check that there are not shortcuts b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all) f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all) if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all): bottleneck_ts.append(t) # we have a bottleneck! else: debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp))) # success? or try again without filtering? if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found! break
tensorflow.contrib.graph_editor.get_forward_walk_ops
1,015
from tensorflow.python.ops import logging_ops Returns: Numpy array of predicted probabilities. """ return self._infer_model(x=x, input_fn=input_fn, batch_size=batch_size) def _get_train_ops(self, features, targets): """See base class.""" global_step = variables.get_global_step() assert global_step loss = self._loss( self._logits(features), targets, self._get_weight_tensor(features)) logging_ops.scalar_summary("loss", loss) linear_vars = self._get_linear_vars() dnn_vars = self._get_dnn_vars() grads = gradients.gradients(loss, dnn_vars + linear_vars) dnn_grads = grads[0:len(dnn_vars)] linear_grads = grads[len(dnn_vars):] train_ops = self._get_linear_training_ops( linear_grads, linear_vars) + self._get_dnn_training_ops(dnn_grads, dnn_vars)
tensorflow.python.ops.logging_ops.scalar_summary
1,016
from tensorflow.python.platform import tf_logging as logging # The code should probably use the step from the checkpoint, because # that's what is being evaluated. if self._estimator is None: raise ValueError("Missing call to set_estimator.") # Check that we are not running evaluation on the same checkpoint. latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir) if latest_path is None: logging.debug("Skipping evaluation since model has not been saved yet " "at step %d.", step) return False if latest_path is not None and latest_path == self._latest_path: logging.debug("Skipping evaluation due to same checkpoint %s for step %d " "as for step %d.", latest_path, step, self._latest_path_step) return False self._latest_path = latest_path self._latest_path_step = step # Run evaluation and log it. validation_outputs = self._estimator.evaluate( x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size, steps=self.eval_steps, metrics=self.metrics, name=self.name)
tensorflow.python.platform.tf_logging.debug
1,017
import tensorflow as tf print("episodes %d" % len(episode_rewards)) print("exploration %f" % exploration.value(t)) print("learning_rate %f" % optimizer_spec.lr_schedule.value(t)) mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='mean_rew',simple_value=mean_episode_reward)]) best_mean_rew_summ = tf.Summary(value=[tf.Summary.Value(tag='best_mean_rew',simple_value=best_mean_episode_reward)]) writer.add_summary(mean_rew_summ, global_step=t)
tensorflow.Summary.Value
1,018
import tensorflow as tf # the combined gradients to all towers (depending on --use_nccl option). # independent: each GPU has its own copy of the variables, and gradients are # not shared between towers. This can be used to check performance when no # data is moved between GPUs. # distributed_replicated: Distributed training only. Each GPU has a copy of # the variables, and updates its copy after the parameter servers are all # updated with the gradients from all servers. Only works with # cross_replica_sync=true. Unlike 'replicated', currently never uses # nccl all-reduce for replicating within a server. tf.flags.DEFINE_string( 'variable_update', 'parameter_server', ('The method for managing variables: ' 'parameter_server, replicated, distributed_replicated, independent')) tf.flags.DEFINE_boolean( 'use_nccl', True, 'Whether to use nccl all-reduce primitives where possible') # Distributed training flags. tf.flags.DEFINE_string('job_name', '', 'One of "ps", "worker", "". Empty for local training') tf.flags.DEFINE_string('ps_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_string('worker_hosts', '', 'Comma-separated list of target hosts') tf.flags.DEFINE_integer('task_index', 0, 'Index of task within the job') tf.flags.DEFINE_string('server_protocol', 'grpc', 'protocol for servers') tf.flags.DEFINE_boolean('cross_replica_sync', True, '')
tensorflow.flags.DEFINE_boolean
1,019
import tensorflow as tf def validation(): (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() images = tf.convert_to_tensor(np.expand_dims(x_test/255.0, -1),dtype=tf.float32)
tensorflow.keras.datasets.mnist.load_data
1,020
import tensorflow as tf # Reshape patches. p = tf.reshape(p, [blk_shape[0], blk_shape[1], blk_shape[2], -1]) # Convolution on patches. q = tf.nn.conv2d(p, w, strides, 'VALID', use_cudnn_on_gpu=True) # Paste convolution results. q_shape = tf.shape(q) def _strides_gt_one(): # Calculate output indices when strides > 1. blk_indices_crop = tf.strided_slice(blk_indices, [0, 0, 0, 0], [ blk_shape[0], q_shape[1] * strides[1], q_shape[2] * strides[2], 3 ], strides) blk_indices_crop = blk_indices_crop // tf.stack([1, strides[1], strides[2]]) return blk_indices_crop def _strides_one(): # Calculate otuput indices when strides = 1. return blk_indices[:, :q_shape[1], :q_shape[2], :] strides_gt_one = tf.logical_or(tf.greater(strides[1], 1), tf.greater(strides[2], 1))
tensorflow.strided_slice
1,021
from tensorflow.core.protobuf import meta_graph_pb2 v0 = tf.Variable(10.0, name="v0") # Creates a saver. save = tf.train.Saver({"v0": v0}) # Generates MetaGraphDef. meta_graph_def = meta_graph_pb2.MetaGraphDef() # Verifies that collection with unsupported key will not be added. tf.add_to_collection(save, 3)
tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef
1,022
from tensorflow import keras # Create inference model using Keras # The model here is a dnn regressor def make_keras_estimator(output_dir): from tensorflow import keras model = keras.models.Sequential() model.add(keras.layers.Dense(32, input_shape=(N_INPUTS,), name=TIMESERIES_INPUT_LAYER)) model.add(keras.layers.Activation('relu')) model.add(keras.layers.Dense(1)) model.compile(loss = 'mean_squared_error', optimizer = 'adam', metrics = ['mae', 'mape']) # mean absolute [percentage] error return keras.estimator.model_to_estimator(model, model_dir=output_dir) # Create the inference model def simple_rnn(features, labels, mode): # 0. Reformat input shape to become a sequence x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1) # 1. Configure the RNN lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias = 1.0) outputs, _ = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
tensorflow.keras.estimator.model_to_estimator
1,023
from tensorflow.examples.tutorials.mnist import input_data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/")
tensorflow.examples.tutorials.mnist.input_data.read_data_sets
1,024
import tensorflow as tf 'num of residual units') tf.app.flags.DEFINE_string('Optimizer', 'mom', 'The optimizer used to train the model.') tf.app.flags.DEFINE_bool('RCE_train', False, 'Whether use RCE to train the model.') tf.app.flags.DEFINE_string('attack_method', 'fgsm',
tensorflow.app.flags.DEFINE_bool
1,025
from tensorflow.python.ops import script_ops with g.as_default(): c = tf.constant([1.], tf.float32) _ = tf.py_func(lambda x: x + 1, [c], [tf.float32]) self.assertTrue(script_ops._py_funcs.size() < 100) def testError(self):
tensorflow.python.ops.script_ops._py_funcs.size
1,026
from tensorflow.python.client import session def testSparseDistributed(self): worker, unused_ps = self._setupCluster() for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with session.Session(worker.target): var0, var1, update_op = self._setupSparse(True, dtype) self._assertSparseCorrect(var0, var1, update_op)
tensorflow.python.client.session.Session
1,027
import tensorflow as tf cols[3] / height, cols[2] / width], axis=1) # add batch dimension (assume batch_size==1) #assert image.get_shape()[0] == 1 boxes = tf.expand_dims(boxes, dim=0) image = tf.image.draw_bounding_boxes(image, boxes) # 在image上画gt_truth return tf.summary.image('ground_truth', image) def _add_act_summary(self, tensor): tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor) tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction', tf.nn.zero_fraction(tensor))
tensorflow.summary.image
1,028
import tensorflow as tf # Create optimizer opt = tf.train.AdamOptimizer(learning_rate, beta1=params.adam_beta1, beta2=params.adam_beta2, epsilon=params.adam_epsilon) if params.update_cycle == 1: train_op = tf.contrib.layers.optimize_loss( name="training", loss=loss, global_step=global_step, learning_rate=learning_rate, clip_gradients=params.clip_grad_norm or None, optimizer=opt, colocate_gradients_with_ops=True
tensorflow.contrib.layers.optimize_loss
1,029
from tensorflow.python.framework import ops input_shape.assert_is_fully_defined() filter_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) filter_shape.assert_is_fully_defined() output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) output_shape.assert_is_fully_defined() filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_in_depth = int(filter_shape[2]) filter_out_depth = int(filter_shape[3]) return ops.OpStats("weight_parameters", (filter_height * filter_width * filter_in_depth * filter_out_depth)) @ops.RegisterStatistics("BiasAdd", "flops") def _calc_bias_add_flops(graph, node): """Calculates the computing needed for BiasAdd.""" input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0]) input_shape.assert_is_fully_defined() input_count = np.prod(input_shape.as_list()) return ops.OpStats("flops", input_count) @ops.RegisterStatistics("BiasAdd", "weight_parameters") def _calc_bias_add_weight_params(graph, node): """Calculates the on-disk weight parameters for BiasAdd.""" bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1]) bias_shape.assert_is_fully_defined()
tensorflow.python.framework.ops.RegisterStatistics
1,030
import tensorflow as tf def double_factorial(n: TensorLike) -> TensorLike: n = tf.convert_to_tensor(value=n) two = tf.ones_like(n) * 2 result = tf.ones_like(n) _, result, _ = tf.while_loop( cond=_double_factorial_loop_condition, body=_double_factorial_loop_body, loop_vars=[n, result, two])
tensorflow.ones_like
1,031
import tensorflow as tf indices_input = tf.reshape(indices_input, [2, -1]) indices_input = tf.transpose(indices_input) res = tf.sparse_to_dense( indices_input, [n_elem, n_indices], 1., 0., name="flat_one_hot")
tensorflow.sparse_to_dense
1,032
import tensorflow as tf v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
tensorflow.Variable.SaveSliceInfo
1,033
from tensorflow.python.ops import gen_nn_ops logits: Unscaled log probabilities. labels: Each entry `labels[i]` must be an index in `[0, num_classes)`. name: A name for the operation (optional). Returns: A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the softmax cross entropy loss. """ # The second output tensor contains the gradients. We use it in # _CrossEntropyGrad() in nn_grad but not here. cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits( logits, labels, name=name) return cost @ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits") def _SparseSoftmaxCrossEntropyWithLogitsShape(op): """Shape function for SparseSoftmaxCrossEntropyWithLogits op.""" logits_shape = op.inputs[0].get_shape() input_shape = logits_shape.with_rank(2)
tensorflow.python.ops.gen_nn_ops._sparse_softmax_cross_entropy_with_logits
1,034
import tensorflow as tf Based on: https://github.com/gitlimlab/CycleGAN-Tensorflow/blob/master/ops.py For tf padding, refer to: https://www.tensorflow.org/api_docs/python/tf/pad """ reg_l2 = tf.keras.regularizers.l2(5e-7) if padding == 'SYMMETRIC' or padding == 'REFLECT': p = (kernel_size - 1) // 2
tensorflow.keras.regularizers.l2
1,035
import tensorflow as tf def load_graph(model_file): graph = tf.Graph() graph_def = tf.compat.v1.GraphDef() import os file_ext = os.path.splitext(model_file)[1] with open(model_file, "rb") as f: if file_ext == '.pbtxt': text_format.Merge(f.read(), graph_def) else: graph_def.ParseFromString(f.read()) with graph.as_default(): tf.import_graph_def(graph_def, name='') tf.io.write_graph(graph_def, '/tmp/', 'optimized_graph.pb',as_text=False) return graph if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input_graph", default=None, help="graph/model to be executed") parser.add_argument("--data_location", default=None, help="full path to the validation data") parser.add_argument("--input_height", default=None, type=int, help="input height")
tensorflow.import_graph_def
1,036
from tensorflow.contrib.framework import deprecated_arg_values class ExportMonitor(EveryN): """Monitor that exports Estimator every N steps.""" # TODO(philstahlfeld): Investigate switching export.export_estimator # configuration values to **kwargs so that updates to the export_estimator # function don't have to be reflected here. @deprecated_arg_values( "2016-09-23", "The signature of the input_fn accepted by export is changing to be " "consistent with what's used by tf.Learn Estimator's train/evaluate. " "input_fn (and in most cases, input_feature_key) will both become " "required args.",
tensorflow.contrib.framework.deprecated_arg_values
1,037
import tensorflow as tf # Location predictions. location_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * self.num_classes * self._box_code_size) location_feature_map = slim.conv2d(net, location_feature_map_depth, [1, 1], activation_fn=None, scope='refined_locations') box_encodings = ops.position_sensitive_crop_regions( location_feature_map, boxes=tf.reshape(proposal_boxes, [-1, self._box_code_size]), box_ind=get_box_indices(proposal_boxes), crop_size=self._crop_size, num_spatial_bins=self._num_spatial_bins, global_pool=True) box_encodings = tf.squeeze(box_encodings, squeeze_dims=[1, 2]) box_encodings = tf.reshape(box_encodings, [batch_size * num_boxes, 1, self.num_classes, self._box_code_size]) # Class predictions. total_classes = self.num_classes + 1 # Account for background class. class_feature_map_depth = (self._num_spatial_bins[0] * self._num_spatial_bins[1] * total_classes) class_feature_map = slim.conv2d(net, class_feature_map_depth, [1, 1], activation_fn=None, scope='class_predictions') class_predictions_with_background = ops.position_sensitive_crop_regions( class_feature_map,
tensorflow.squeeze
1,038
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
tensorflow.metrics.accuracy
1,039
import tensorflow as tf features = { d.input_ids: tf.io.VarLenFeature(tf.int64), d.token_type_ids: tf.io.VarLenFeature(tf.int64), d.attention_mask: tf.io.VarLenFeature(tf.int64), d.labels: tf.io.VarLenFeature(tf.int64), } dataset = dataset.map( lambda x: tf.io.parse_example(x, features), num_parallel_calls=utils.AUTOTUNE, ).prefetch(utils.AUTOTUNE) dataset = dataset.map( lambda x: ( tf.cast(tf.sparse.to_dense(x[d.input_ids]), tf.int32), tf.cast(tf.sparse.to_dense(x[d.token_type_ids]), tf.int32),
tensorflow.io.parse_example
1,040
import tensorflow as tf ref=self.internals_memory[name], indices=indices, updates=internals[name] )) for name in sorted(actions): assignments.append(tf.scatter_update( ref=self.actions_memory[name], indices=indices, updates=actions[name] )) assignments.append(tf.scatter_update(ref=self.terminal_memory, indices=indices, updates=terminal)) assignments.append(tf.scatter_update(ref=self.reward_memory, indices=indices, updates=reward)) # Add episode indices. with tf.control_dependencies(control_inputs=assignments): num_episodes = tf.count_nonzero(input_tensor=terminal, axis=0, dtype=util.tf_dtype('int')) assignment = tf.assign( ref=self.episode_indices[self.episode_count: self.episode_count + num_episodes], value=tf.boolean_mask(tensor=indices, mask=terminal) )
tensorflow.scatter_update
1,041
import tensorflow as tf replaced_list = var_list if self._scale != 1.0: loss = tf.scalar_mul(self._scale, loss) gradvar = self._optimizer.compute_gradients(loss, replaced_list, *args, **kwargs) final_gradvar = [] for orig_var, (grad, var) in zip(var_list, gradvar): if var is not orig_var: grad = tf.cast(grad, orig_var.dtype) if self._scale != 1.0: grad = tf.scalar_mul(1. / self._scale, grad) final_gradvar.append((grad, orig_var)) return final_gradvar def apply_gradients(self, *args, **kwargs): return self._optimizer.apply_gradients(*args, **kwargs) def main(argv=None): start1 = time.time() import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
tensorflow.scalar_mul
1,042
import tensorflow as tf An example with the same label and an augmented version of the image. """ image, label = example['image'], example['label'] image = tf.image.random_flip_left_right(image) image_shape = tf.shape(image) image = tf.pad( image, [[random_crop_pad, random_crop_pad], [random_crop_pad, random_crop_pad], [0, 0]], mode='REFLECT') image = tf.image.random_crop(image, image_shape) return {'image': image, 'label': label} def auto_augmentation(example, dataset_name): """Applies the AutoAugment policy found for the dataset. AutoAugment: Learning Augmentation Policies from Data
tensorflow.image.random_crop
1,043
import tensorflow as tf if not full_cov and full_output_cov: fvar = tf.matrix_diag(fvar) # N x P x P
tensorflow.matrix_diag
1,044
from tensorflow.contrib.layers.python.layers.layers import _build_variable_getter, _add_variable_to_collections bias_regularizer=biases_regularizer, activity_regularizer=None, use_spectral_norm=use_spectral_norm, is_training=is_training, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse) outputs = layer.apply(inputs) # Add variables to collections. _add_variable_to_collections(layer.kernel, variables_collections, 'weights') if layer.use_bias: _add_variable_to_collections(layer.bias, variables_collections, 'biases') if normalizer_fn is not None: normalizer_params = normalizer_params or {} outputs = normalizer_fn(outputs, **normalizer_params) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
tensorflow.contrib.layers.python.layers.layers._add_variable_to_collections
1,045
import tensorflow as tf # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient tp, tp_op = tf.metrics.true_positives( predictions, label_ids, weights=is_real_example) tn, tn_op = tf.metrics.true_negatives( predictions, label_ids, weights=is_real_example) fp, fp_op = tf.metrics.false_positives( predictions, label_ids, weights=is_real_example) fn, fn_op = tf.metrics.false_negatives( predictions, label_ids, weights=is_real_example)
tensorflow.metrics.false_positives
1,046
import tensorflow as tf Subclasses can override this function in order to preprocess, and can yield any number of strings. Args: filepath: a string Yields: unicode strings. """ f = tf.gfile.Open(filepath) b = f.read() yield text_encoder.to_unicode_ignore_errors(b) def file_generator(self, filepaths, max_chars_per_file=None, max_chars_total=None): """Read complete text of input files and yield unicode strings.
tensorflow.gfile.Open
1,047
import tensorflow as tf train_y_1 = to_categorical(train_y_1, n_class_1) test_y_1 = to_categorical(test_y_1, n_class_1) train_y_2 = to_categorical(train_y_2, n_class_2) test_y_2 = to_categorical(test_y_2, n_class_2) return train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 def apply_cross_stitch(input1, input2): input1_reshaped = contrib.layers.flatten(input1) input2_reshaped = contrib.layers.flatten(input2) input = tf.concat((input1_reshaped, input2_reshaped), axis=1) # initialize with identity matrix cross_stitch = tf.get_variable("cross_stitch", shape=(input.shape[1], input.shape[1]), dtype=tf.float32, collections=['cross_stitches', tf.GraphKeys.GLOBAL_VARIABLES], initializer=tf.initializers.identity()) output = tf.matmul(input, cross_stitch) # need to call .value to convert Dimension objects to normal value input1_shape = list(-1 if s.value is None else s.value for s in input1.shape) input2_shape = list(-1 if s.value is None else s.value for s in input2.shape) output1 = tf.reshape(output[:, :input1_reshaped.shape[1]], shape=input1_shape) output2 = tf.reshape(output[:, input1_reshaped.shape[1]:], shape=input2_shape) return output1, output2 def main(args): train_X, train_y_1, train_y_2, test_X, test_y_1, test_y_2 = load_data() m = train_X.shape[0]
tensorflow.initializers.identity
1,048
import tensorflow as tf with self.test_session() as sess: with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)): inp = [tf.constant(0.5, shape=[2, 2])] * 2 _, enc_state = tf.nn.rnn( tf.nn.rnn_cell.GRUCell(2), inp, dtype=tf.float32) dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3 cell = tf.nn.rnn_cell.OutputProjectionWrapper( tf.nn.rnn_cell.GRUCell(2), 4) dec, mem = tf.nn.seq2seq.rnn_decoder(dec_inp, enc_state, cell) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape)
tensorflow.nn.seq2seq.rnn_decoder
1,049
from tensorflow.contrib.distributions.python.ops import distribution_util new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape)) x = array_ops.reshape(x, shape=new_shape) x = distribution_util.rotate_transpose(x, shift=-1) return x, sample_shape
tensorflow.contrib.distributions.python.ops.distribution_util.rotate_transpose
1,050
from tensorflow.python.ops import nn `false_positives` variables appropriately, and whose value matches `precision`. Raises: ValueError: If `ignore_mask` is not `None` and its shape doesn't match `predictions`, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ default_name = _at_k_name('precision', k, class_id=class_id) with ops.name_scope(name, default_name, (predictions, labels, ignore_mask, weights)) as scope: _, top_k_idx = nn.top_k(predictions, k) return _streaming_sparse_precision_at_k( top_k_idx=top_k_idx, labels=labels, k=k, class_id=class_id, ignore_mask=ignore_mask, weights=weights, metrics_collections=metrics_collections, updates_collections=updates_collections, name=scope)
tensorflow.python.ops.nn.top_k
1,051
import tensorflow as tf def GetWordPred(o_): logits = tf.nn.xw_plus_b(o_, pred_mat, pred_bias) return tf.nn.softmax(logits) preds = GetWordPred(wvsum) z = tf.tile(tf.reshape(tf.reduce_sum(preds,1),[-1,1]), [1, out_vocab_size]) self.preds, self.z = preds, z self.probs = tf.div(preds, z) #normalize self.unweighted_xent = _SafeXEnt(self.y, self.probs) self._xent = _SafeXEnt(self.y, self.probs, class_weights=weights) self.cost = tf.reduce_mean(self.example_weights * self._xent)
tensorflow.div
1,052
import tensorflow as tf imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.)
tensorflow.floordiv
1,053