seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf """ decode_hparams = hook_args.decode_hparams if (decode_hparams.decode_reference is None or decode_hparams.decode_to_file is None): return None values = [] bleu = 100 * bleu_hook.bleu_wrapper( decode_hparams.decode_reference, decode_hparams.decode_to_file) values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) return values def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
tensorflow.Summary.Value
14,400
import tensorflow as tf """ mu, var = self.build_posterior_mean_var(X, Y, test_points, True) jitter = tfhacks.eye(tf.shape(mu)[0], var.dtype) * 1e-06 L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V) return tf.transpose(samples) #samples = []
tensorflow.shape
14,401
import tensorflow as tf def gen_epochs(n, num_steps): for i in range(n): yield gen_batch(gen_data(), batch_size, num_steps) '''定义placeholder''' x = tf.placeholder(tf.int32, [batch_size, num_steps], name="x") y = tf.placeholder(tf.int32, [batch_size, num_steps], name='y') init_state = tf.zeros([batch_size, state_size]) '''RNN输入''' rnn_inputs = tf.one_hot(x, num_classes) #rnn_inputs = tf.unstack(x_one_hot, axis=1) '''不需要了,使用tensorflow中定义好的cell即可''' #'''定义RNN cell'''
tensorflow.zeros
14,402
import tensorflow as tf assert ob.name in config.optimizers, ob optimizer = config.optimizers[ob.name]( include=ob.include, exclude=ob.exclude, step=trainer.step, log=trainer.log, debug=config.debug, name=ob.name) condition = tf.equal(trainer.phase, 'train') summary, grad_norm = optimizer.maybe_minimize(condition, ob.value) summaries.append(summary) grad_norms[ob.name] = grad_norm return summaries, grad_norms def simulate_episodes(
tensorflow.equal
14,403
import tensorflow as tf layer_output, sequence_lengths, seq_axis=1, batch_axis=0 ) ) with tf.control_dependencies([layer_output]): # update the initial states for i in range(2): new_state = tf.concat( [final_state[i][:batch_size, :], init_states[i][batch_size:, :]], axis=0) state_update_op = tf.assign(init_states[i], new_state) update_ops.append(state_update_op) layer_input = layer_output self.mask = mask self.sequence_lengths = sequence_lengths self.update_state_op = tf.group(*update_ops) def dump_token_embeddings(vocab_file, options_file, weight_file, outfile): ''' Given an input vocabulary file, dump all the token embeddings to the
tensorflow.assign
14,404
import tensorflow as tf run_log_dir = os.path.join(FLAGS.log_dir, 'exp_BN_bs_{bs}_lr_{lr}_aug_flip_brightness'.format(bs=FLAGS.batch_size, lr=FLAGS.learning_rate)) def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial, name='weights') def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial, name='biases') def deepnn(x, train):
tensorflow.Variable
14,405
import tensorflow as tf with tf.name_scope(scope): weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='loss_weight') loss = tf.multiply(weight, tf.nn.l2_loss(tensor), name='value') return loss def lppool(inpOp, pnorm, kH, kW, dH, dW, padding, name): with tf.variable_scope(name): if pnorm == 2: pwr = tf.square(inpOp) else: pwr = tf.pow(inpOp, pnorm) subsamp = tf.nn.avg_pool(pwr, ksize=[1, kH, kW, 1], strides=[1, dH, dW, 1], padding=padding) subsamp_sum = tf.multiply(subsamp, kH*kW)
tensorflow.square
14,406
import tensorflow as tf def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1)
tensorflow.matmul
14,407
import tensorflow as tf from evaluation import factory def write_summary(logs, summary_writer, current_step): """Write out summaries of current training step for the checkpoint.""" with tf.Graph().as_default(): summaries = [tf.Summary.Value(tag=tag, simple_value=value) for tag, value in logs.items()] tf_summary = tf.Summary(value=summaries) summary_writer.add_summary(tf_summary, current_step) class TpuExecutor(object):
tensorflow.Summary.Value
14,408
import tensorflow as tf tf.reverse_v2(scales_to_logits_reversed[MERGED_LOGITS_SCOPE], [2]), tf.shape(images)[1:3], scales_to_logits_reversed[MERGED_LOGITS_SCOPE].dtype) outputs_to_predictions[output].append( tf.expand_dims(tf.nn.softmax(logits_reversed), 4)) for output in sorted(outputs_to_predictions): predictions = outputs_to_predictions[output] # Compute average prediction across different scales and flipped images. predictions = tf.reduce_mean(tf.concat(predictions, 4), axis=4) outputs_to_predictions[output] = tf.argmax(predictions, 3, output_type=tf.dtypes.int32) outputs_to_predictions[output + PROB_SUFFIX] = tf.nn.softmax(predictions) return outputs_to_predictions def predict_labels(images, model_options): """Predicts segmentation labels. Args: images: A tensor of size [batch, height, width, channels]. model_options: A ModelOptions instance to configure models.
tensorflow.nn.softmax
14,409
import tensorflow as tf self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2), dtype=np.float32) self.b_soft = tf.get_variable( "b", [1, self.num_branches], initializer=tf.constant_initializer(b_init)) b_soft_no_learn = np.array( [0.25, 0.25] + [-0.25] * (self.num_branches - 2), dtype=np.float32) b_soft_no_learn = np.reshape(b_soft_no_learn, [1, self.num_branches]) self.b_soft_no_learn = tf.constant(b_soft_no_learn, dtype=tf.float32) with tf.variable_scope("attention"): self.w_attn_1 = tf.get_variable("w_1", [self.lstm_size, self.lstm_size]) self.w_attn_2 = tf.get_variable("w_2", [self.lstm_size, self.lstm_size]) self.v_attn = tf.get_variable("v", [self.lstm_size, 1]) def _build_sampler(self, prev_c=None, prev_h=None, use_bias=False): """Build the sampler ops and the log_prob ops.""" print ("-" * 80) print ("Build controller sampler") anchors = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) anchors_w_1 = tf.TensorArray( tf.float32, size=self.num_cells + 2, clear_after_read=False) arc_seq = tf.TensorArray(tf.int32, size=self.num_cells * 4) if prev_c is None: assert prev_h is None, "prev_c and prev_h must both be None"
tensorflow.get_variable
14,410
import tensorflow as tf activation=modeling.get_activation(bert_config.hidden_act), kernel_initializer=modeling.create_initializer( bert_config.initializer_range)) input_tensor = modeling.layer_norm(input_tensor) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. output_bias = tf.get_variable( "output_bias", shape=[bert_config.vocab_size], initializer=tf.zeros_initializer()) logits = tf.matmul(input_tensor, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) label_ids = tf.reshape(label_ids, [-1]) one_hot_labels = tf.one_hot( label_ids, depth=bert_config.vocab_size, dtype=tf.float32) per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
tensorflow.zeros_initializer
14,411
import tensorflow as tf # and due to the fact that the rightmost boundary is essentially ignored. boundaries = tf.expand_dims(tf.cast(boundaries, tf.float32), 0) - 0.0001 bucket_indices = tf_utils.assign_buckets( tf.cast(x, tf.float32), remove_leftmost_boundary(boundaries)) bucket_vocab, counts = count_per_key(tf.strings.as_string(bucket_indices)) counts = tf_utils.reorder_histogram(bucket_vocab, counts, tf.size(boundaries) - 1) return counts, boundaries
tensorflow.strings.as_string
14,412
import tensorflow as tf cross_entropy (tf.Operation): Final layer of network. cross_entropy_grads (tf.Operation): Gradient computation. sess (tf.Session): Session used for training. variables (TensorFlowVariables): Extracted variables and methods to manipulate them. """ def __init__(self, shape): """Creates a LinearModel object.""" x = tf.placeholder(tf.float32, [None, shape[0]]) w = tf.Variable(tf.zeros(shape)) b = tf.Variable(tf.zeros(shape[1])) self.x = x self.w = w self.b = b y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]]) self.y_ = y_
tensorflow.placeholder
14,413
import tensorflow as tf L = tf.batch_cholesky(tf.transpose(var, (2, 0, 1)) + jitter) V_shape = [tf.shape(L)[0], tf.shape(L)[1], num_samples] V = tf.random_normal(V_shape, dtype=L.dtype) samples = tf.expand_dims(tf.transpose(mu), -1) + tf.batch_matmul(L, V)
tensorflow.random_normal
14,414
import tensorflow as tf def test_randomly_select_one_point_per_segment(self): instance_labels = tf.constant([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 2, 2, 2, 2, 2, 2], [1, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2], [0, 0, 0, 0, 2, 2, 2, 2]], dtype=tf.int32) instance_labels = tf.reshape(instance_labels, [-1]) (indices, masks_t) = isu.randomly_select_one_point_per_segment(instance_labels) masks = tf.transpose(masks_t) masks = tf.reshape(masks, [3, 5, 8]) expected_masks = self.get_instance_masks() selected_instances = tf.gather(instance_labels, indices) expected_selected_instances = tf.constant([0, 1, 2], dtype=tf.int32) self.assertAllEqual(selected_instances.numpy(), expected_selected_instances.numpy()) self.assertAllClose(masks.numpy(), expected_masks.numpy()) def test_inputs_Distances_to_centers(self): inputs = tf.random.uniform( [100, 8], minval=-10, maxval=10.0, dtype=tf.float32) centers = tf.random.uniform( [5, 8], minval=-10, maxval=10.0, dtype=tf.float32) distances1 = isu.inputs_distances_to_centers(inputs, centers) num_centers = tf.shape(centers)[0] inputs_reshaped = tf.tile(tf.expand_dims(inputs, axis=1),
tensorflow.gather
14,415
import tensorflow as tf def loop_analysis(element): x = tf.expand_dims(element[0], 0) x_coori = tf.expand_dims(element[1], 0) y = analysis_transform(x_coori,x) return tf.squeeze(y,axis=0) element = [x,x_coori] ys = tf.map_fn(loop_analysis, element, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Analysis Transform") def loop_hyper_encoder(y): y = tf.expand_dims(y, 0) z = hyper_encoder(y) return tf.squeeze(z,axis=0) zs = tf.map_fn(loop_hyper_encoder, ys, dtype=tf.float32, parallel_iterations=1, back_prop=False) print("Hyper Encoder") z_hats, _ = entropy_bottleneck(zs, False) print("Quantize hyperprior") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0])
tensorflow.squeeze
14,416
import tensorflow as tf scaffold = tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold(FLAGS))) def parse_comma_list(args): return [float(s.strip()) for s in args.split(',')] def main(_): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace( save_summary_steps=FLAGS.save_summary_steps).replace( keep_checkpoint_max=5).replace(
tensorflow.GPUOptions
14,417
import tensorflow as tf rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) else: rand = tf.random_uniform(layer_shape[:-3] + [ 1, 1, hparams.bottleneck_bits]) d = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0 # Embed. x = tf.layers.dense( features["cur_target_frame"], filters, name="latent_embed", bias_initializer=tf.random_normal_initializer(stddev=0.01)) x = common_attention.add_timing_signal_nd(x) if hparams.full_latent_tower: for i in range(hparams.num_compress_steps): with tf.variable_scope("latent_downstride%d" % i): x = common_layers.make_even_size(x) if i < hparams.filter_double_steps: filters *= 2 x = common_attention.add_timing_signal_nd(x) x = tf.layers.conv2d(x, filters, kernel,
tensorflow.random_normal_initializer
14,418
from tensorflow.python.ops import control_flow_ops self.capped_d_grads = self._clip_grad_norms( d_optimizer.compute_gradients(self.d_losses[-1], t_vars['d_vars'])) self.capped_g_grads = self._clip_grad_norms( g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars'])) global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False) if self.gradient_multipliers is not None: with tf.name_scope('multiply_grads'): self.capped_d_grads = self._multiply_gradients(self.capped_d_grads, self.gradient_multipliers) apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step) apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step) self.train_op_d = control_flow_ops.with_dependencies([apply_d_gradient_op], self.d_losses[-1]) self.train_op_g = control_flow_ops.with_dependencies([apply_g_gradient_op], self.g_losses[-1])
tensorflow.python.ops.control_flow_ops.with_dependencies
14,419
import tensorflow as tf "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " "url.") tf.flags.DEFINE_string( "tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not "
tensorflow.flags.DEFINE_string
14,420
import tensorflow as tf
tensorflow.keras.layers.concatenate
14,421
import tensorflow as tf target_samples -= tf.reduce_mean(target_samples, 0) source_samples = tf.nn.l2_normalize(source_samples, 1) target_samples = tf.nn.l2_normalize(target_samples, 1) source_cov = tf.matmul(tf.transpose(source_samples), source_samples)
tensorflow.nn.l2_normalize
14,422
import tensorflow as tf "root", initializer=tf.constant_initializer(0.5)): cell = tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True) cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell] * 2, state_is_tuple=True) inp = tf.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32) attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs])
tensorflow.constant
14,423
import tensorflow as tf assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tensorflow.logging.info
14,424
import tensorflow as tf # summaries for TensorBoard visualisation validation_summary = tf.summary.merge([img_summary, acc_summary]) training_summary = tf.summary.merge([img_summary, loss_summary]) test_summary = tf.summary.merge([img_summary, acc_summary]) # saver for checkpoints saver = tf.train.Saver(tf.global_variables(), max_to_keep=1) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(run_log_dir + '_train', sess.graph, flush_secs=5) summary_writer_validation = tf.summary.FileWriter(run_log_dir + '_validate', sess.graph, flush_secs=5) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) # Training and validation for step in range(FLAGS.max_steps): # Training: Backpropagation using train set (trainImages, trainLabels) = cifar.getTrainBatch() (testImages, testLabels) = cifar.getTestBatch()
tensorflow.summary.FileWriter
14,425
import tensorflow as tf model_name: string, a model name used inside the model server. image_size: int, size of image, assuming image height and width. batch_sizes: list, a list of batch sizes to create different input requests. num_requests: int, number of requests per batch size. Raises: ValueError: if batch_sizes is not a valid integer list. """ if not isinstance(batch_sizes, list) or not batch_sizes: raise ValueError('batch sizes should be a valid non-empty list.') extra_assets_dir = os.path.join(savedmodel_dir, 'assets.extra') tf.gfile.MkDir(extra_assets_dir) with tf.python_io.TFRecordWriter( os.path.join(extra_assets_dir, 'tf_serving_warmup_requests')) as writer: for batch_size in batch_sizes: for _ in range(num_requests): request = predict_pb2.PredictRequest() image = np.uint8(np.random.rand(image_size, image_size, 3) * 255) request.inputs['input'].CopyFrom( tf.make_tensor_proto( [_encode_image(image)] * batch_size, shape=[batch_size])) request.model_spec.name = model_name
tensorflow.gfile.MkDir
14,426
from tensorflow.python.framework import ops is_continuous=True, is_reparameterized=False, parameters=parameters, graph_parents=[self._alpha, self._beta], name=ns) @staticmethod def _param_shapes(sample_shape): return dict( zip(("alpha", "beta"), ([ops.convert_to_tensor( sample_shape, dtype=dtypes.int32)] * 2))) @property def alpha(self): """Shape parameter.""" return self._alpha @property
tensorflow.python.framework.ops.convert_to_tensor
14,427
import tensorflow as tf elif kh == 14 and kw == 14: return 196.0 * 21.0 / 4096.0 else: rec = tf.cast(kw * kh, tf.float32) n_max = 7 + tf.math.ceil(tf.math.log(rec) / tf.math.log(2.)) ns = tf.range(0., n_max) ns_pow = tf.pow(2., ns) ks = tf.round(ns_pow / rec) diffs = tf.math.abs(ks / ns_pow - 1 / rec) n = tf.argmin(diffs) k = ks[n] scale = k / tf.pow(2., tf.cast(n, tf.float32))
tensorflow.pow
14,428
from tensorflow.python.framework import tensor_shape @ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter") def _DepthwiseConv2dNativeBackpropFilterShape(op): """Shape function for the DepthwiseConv2dNativeBackpropFilter op.""" filter_shape = tensor_util.constant_value(op.inputs[1]) if filter_shape is not None: return [tensor_shape.TensorShape(filter_shape.tolist())] else: return [tensor_shape.unknown_shape(ndims=4)] @ops.RegisterShape("DepthwiseConv2dNativeBackpropInput") def _DepthwiseConv2dNativeBackpropInputShape(op): """Shape function for the DepthwiseConv2dNativeBackpropInput op.""" input_shape = tensor_util.constant_value(op.inputs[0]) if input_shape is not None:
tensorflow.python.framework.tensor_shape.unknown_shape
14,429
import tensorflow as tf def metric_fn(per_example_loss, label_ids, logits, is_real_example): """Compute Matthew's correlations for STS-B.""" predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) # https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
tensorflow.argmax
14,430
import tensorflow as tf def max_pool(img, k): return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') # Consider stride size when using xavier for fp network def get_xavier_weights(filter_shape, poolsize=(2, 2), name=None): fan_in = np.prod(filter_shape[1:]) fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) // np.prod(poolsize)) low = -4*np.sqrt(6.0/(fan_in + fan_out)) # use 4 for sigmoid, 1 for tanh activation high = 4*np.sqrt(6.0/(fan_in + fan_out)) weights = np.random.uniform(low=low, high=high, size=filter_shape) return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights)) def get_he_weights(filter_shape, name=None): fan_in = np.prod(filter_shape[1:]) stddev = np.sqrt(2.6/fan_in) weights = stddev * np.random.randn(filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3]) return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights))
tensorflow.constant_initializer
14,431
import tensorflow as tf images = data['data'] num_images = images.shape[0] images = images.reshape((num_images, 3, 32, 32)) labels = data['labels'] with tf.Graph().as_default(): image_placeholder = tf.placeholder(dtype=tf.uint8) encoded_image = tf.image.encode_png(image_placeholder) with tf.Session('') as sess: for j in range(num_images): sys.stdout.write('\r>> Reading file [%s] image %d' % ( filename, offset + 1)) sys.stdout.flush()
tensorflow.image.encode_png
14,432
import tensorflow as tf f = tf.sigmoid(util.projection(tf.concat([top_span_emb, attended_span_emb], 1), util.shape(top_span_emb, -1))) # [k, emb] top_span_emb = f * attended_span_emb + (1 - f) * top_span_emb # [k, emb] top_antecedent_scores = tf.concat([dummy_scores, top_antecedent_scores], 1) # [k, c + 1] top_antecedent_cluster_ids = tf.gather(top_span_cluster_ids, top_antecedents) # [k, c] top_antecedent_cluster_ids += tf.to_int32(tf.log(tf.to_float(top_antecedents_mask))) # [k, c] same_cluster_indicator = tf.equal(top_antecedent_cluster_ids, tf.expand_dims(top_span_cluster_ids, 1)) # [k, c] non_dummy_indicator = tf.expand_dims(top_span_cluster_ids > 0, 1) # [k, 1] pairwise_labels = tf.logical_and(same_cluster_indicator, non_dummy_indicator) # [k, c] dummy_labels = tf.logical_not(tf.reduce_any(pairwise_labels, 1, keepdims=True)) # [k, 1] top_antecedent_labels = tf.concat([dummy_labels, pairwise_labels], 1) # [k, c + 1] loss = self.softmax_loss(top_antecedent_scores, top_antecedent_labels) # [k] loss = tf.reduce_sum(loss) # [] return [candidate_starts, candidate_ends, candidate_mention_scores, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores], loss def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends): span_emb_list = []
tensorflow.reduce_any
14,433
import tensorflow as tf color_name = items[0] chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256) # The sequence length is needed by our RNN. length = tf.cast(tf.shape(chars)[0], dtype=tf.int64) return rgb, chars, length
tensorflow.shape
14,434
import tensorflow as tf else: dataset_path = os.path.join(dataset_path, 'dev.json') # Opening with GFile allows to use remotely stored files, e.g. # in a gs bucket. dataset_handle = tf.io.gfile.GFile(dataset_path, 'r') dataset = json.load(dataset_handle) def mathqa_yield_examples(generator=None):
tensorflow.io.gfile.GFile
14,435
import tensorflow as tf with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run() def testBPropSmoothDecay(self): with self.session(use_gpu=False): tf.set_random_seed(93820985) p = self._testParams() p.train.lr_schedule = ( schedule.ContinuousLearningRateSchedule.Params().Set( start_step=350000, half_life_steps=45000)) mdl = p.Instantiate() mdl.FPropDefaultTheta() mdl.BProp() tf.global_variables_initializer().run() test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval()) mdl.train_op.run()
tensorflow.set_random_seed
14,436
import tensorflow as tf reconstructed_outputs, reconstructed_weights, _, _, _, _, _ = attention_decoder( attention_states=[states], initial_state=states[:,-1,:], feed_previous=feed_previous, decoder_inputs=targets[1][:, :-1], encoder_input_length=target_length, decoder=decoders[1], training=training, encoders=decoders[:1] ) target_weights = get_weights(targets[1][:, 1:], utils.EOS_ID, include_first_eos=True) xent_loss += reconstruction_weight * sequence_loss(logits=reconstructed_outputs, targets=targets[1][:, 1:], weights=target_weights) max_src_len = tf.shape(reconstructed_weights)[1] batch_size = tf.shape(reconstructed_weights)[0] attn_loss = tf.matmul(reconstructed_weights, attention_weights) - tf.eye(max_src_len) src_mask = tf.sequence_mask(encoder_input_length[0], maxlen=max_src_len, dtype=tf.float32) src_mask = tf.einsum('ij,ik->ijk', src_mask, src_mask) attn_loss *= tf.to_float(src_mask) # don't take padding words into account attn_loss = tf.norm(attn_loss) / tf.to_float(batch_size)
tensorflow.shape
14,437
import tensorflow as tf x_image = tf.cond(train, lambda: tf.map_fn(tf.image.random_flip_left_right, x_image), lambda: x_image) x_image = tf.cond(train, lambda: tf.map_fn(lambda x: tf.image.random_brightness(x, 0.5), x_image), lambda: x_image) img_summary = tf.summary.image('Input_images', x_image) # First convolutional layer - maps one image to 32 feature maps. with tf.variable_scope('Conv_1'): conv1 = tf.layers.conv2d( inputs=x_image, filters=32, kernel_size=[5,5], padding='same', use_bias=False,
tensorflow.variable_scope
14,438
import tensorflow as tf validloss = [] itr = 0 saver = tf.train.Saver()
tensorflow.train.Saver
14,439
import tensorflow as tf d_loss = real_loss + fake_loss if mode == "min_fake": g_loss = - fake_loss elif mode == "max_real": g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logits, labels=tf.ones_like(fake_logits))) else: raise ValueError("Unknown mode: %s. Only 'min_fake' and 'max_real' " "are allowed.") return g_loss, d_loss
tensorflow.ones_like
14,440
import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.gfile.MakeDirs(FLAGS.output_dir) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, tpu_config=tf.contrib.tpu.TPUConfig( num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) model_fn = model_fn_builder(
tensorflow.contrib.cluster_resolver.TPUClusterResolver
14,441
import tensorflow as tf assert_op = tf.Assert(tf.is_finite(corr_loss), [corr_loss]) with tf.control_dependencies([assert_op]): tag = 'Correlation Loss'
tensorflow.control_dependencies
14,442
import tensorflow as tf def augment(features, targets): features['image'] = _cifar_augment_image(features['image']) return features, targets def flatten_image(features, targets): """Flatten the image.""" img = features['image'] flat = tf.cast(tf.reshape(img, [-1]), tf.int64) tgt = tf.expand_dims(targets, axis=0) flat_with_target = tf.concat([flat, tgt], axis=0) new_features = {} new_features['image'] = flat_with_target predict_image_weight = predict_image_train_weight if training else 0.0 mask_begin = tf.ones_like(flat) mask_begin = tf.cast(mask_begin, tf.float32) * predict_image_weight mask_end = tf.cast(tf.ones_like(tgt), tf.float32) new_features['mask'] = tf.concat([mask_begin, mask_end], axis=0) return new_features, flat_with_target if training: dataset = dataset.map(augment) dataset = dataset.map(flatten_image) return dataset @gin.configurable(module='trax.data', denylist=['dataset', 'training'])
tensorflow.ones_like
14,443
import tensorflow as tf pred_flat = tf.reshape(pred, [-1]) tgt_flat = tf.reshape(tgt, [-1]) batch = tf.stack([pred_flat, tgt_flat], 1) num_sam = tools.shape(batch)[0] index = tf.range(num_sam) divider = tf.constant(resample, dtype=tf.float32) def sample_compute(cur_loss, i): batch1 = tf.gather(batch, tf.random.shuffle(index)) batch2 = tf.gather(batch, tf.random.shuffle(index)) pred1 = tf.slice(batch1, [0, 0], [num_sam, 1]) pred2 = tf.slice(batch2, [0, 0], [num_sam, 1]) tgt1 = tf.slice(batch1, [0, 1], [num_sam, 1]) tgt2 = tf.slice(batch2, [0, 1], [num_sam, 1]) loss = cur_loss + compute_contra_loss(pred1, pred2, tgt1, tgt2) print(loss) return (loss, i + 1)
tensorflow.random.shuffle
14,444
import tensorflow as tf If the mapping does not exist or `vocab_filename` is not present within it, we will default to sanitizing `vocab_filename` and searching for files matching it within the assets directory. In either case, if the constructed path does not point to an existing file within the assets subdirectory, we will return a None. Args: vocab_filename: The vocabulary name to lookup. """ mapping_path = os.path.join(self._transformed_metadata_dir, self.ASSET_MAP) mapping = {} if tf.io.gfile.exists(mapping_path): with tf.io.gfile.GFile(mapping_path) as f: mapping = json.loads(f.read()) if vocab_filename in mapping: vocab_path = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, mapping[vocab_filename]) if tf.io.gfile.exists(vocab_path): return vocab_path prefix = os.path.join(self.transform_savedmodel_dir, tf.saved_model.ASSETS_DIRECTORY, sanitized_vocab_filename(filename=vocab_filename)) files = tf.io.gfile.glob(prefix) + tf.io.gfile.glob( '{}.tfrecord.gz'.format(prefix))
tensorflow.io.gfile.GFile
14,445
import tensorflow as tf drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([], tf.int64),
tensorflow.FixedLenFeature
14,446
import tensorflow as tf w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1)) b = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) v = tf.Variable(tf.random_normal([attention_size], stddev=0.1)) with tf.name_scope('v'): # Applying fully connected layer with non-linear activation to each of the B*T timestamps; # the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size tmp1 = tf.tensordot(facts, w1, axes=1) tmp2 = tf.tensordot(query, w2, axes=1) tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]]) tmp = tf.tanh((tmp1 + tmp2) + b) # For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape key_masks = mask # [B, 1, T] # key_masks = tf.expand_dims(mask, 1) # [B, 1, T] paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1) v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T] alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
tensorflow.tanh
14,447
import tensorflow as tf # sess.close() identity_matrix = tf.diag([1.0, 3.0, 1.0]) A = tf.truncated_normal([2, 3]) B = tf.fill([2, 3], 5.0) C = tf.random_uniform([3, 2], maxval=100) D = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]])) sess = tf.Session() # sess.run(tf.global_variables_initializer()) # print(sess.run(tf.random_normal(mean=10, shape=[10])))
tensorflow.random_uniform
14,448
import tensorflow as tf return T.ones(shape, dtype) else: assert K.backend() == 'tensorflow' import tensorflow as tf return tf.ones(shape, dtype) def zeros(shape, dtype=K.floatx()):
tensorflow.ones
14,449
import tensorflow as tf def get_masked_lm_output(bert_config, input_tensor, output_weights, positions, label_ids, label_weights): """Get loss and log probs for the masked LM.""" input_tensor = gather_indexes(input_tensor, positions) with tf.variable_scope("cls/predictions"): # We apply one more non-linear transformation before the output layer. # This matrix is not used after pre-training. with tf.variable_scope("transform"): input_tensor = tf.layers.dense(
tensorflow.variable_scope
14,450
import tensorflow as tf assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.strides = [d_t,d_h,d_w] def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
tensorflow.constant_initializer
14,451
import tensorflow as tf logits = tf.expand_dims(logits, axis=1) value = tf.layers.dense(x, self.distributional_value_size) return {"target_policy": logits, "target_value": value} @registry.register_model class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase): """Small cnn network with categorical output.""" def body(self, features): observations = features["inputs"] x = tf.transpose(observations, [0, 2, 3, 1, 4]) x_shape = common_layers.shape_list(x) x = tf.reshape(x, x_shape[:-2] + [-1]) dropout = getattr(self.hparams, "dropout_ppo", 0.0) with tf.variable_scope("feed_forward_cnn_small"): x = tf.cast(x, tf.float32) / 255.0 x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 32, (4, 4), strides=(2, 2), name="conv1", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 64, (4, 4), strides=(2, 2), name="conv2", activation=common_layers.belu, padding="SAME") x = tf.nn.dropout(x, rate=dropout) x = tf.layers.conv2d( x, 128, (4, 4), strides=(2, 2), name="conv3", activation=common_layers.belu, padding="SAME")
tensorflow.variable_scope
14,452
import tensorflow as tf mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1) num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1]) gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1))
tensorflow.losses.mean_squared_error
14,453
import tensorflow as tf if FLAGS.pretrained_model_path is not None: variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(), ignore_missing_vars=True) config = tf.ConfigProto() custom_op = config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["use_off_line"].b = True # 在昇腾AI处理器执行训练 config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 关闭remap开关 if FLAGS.allow_mix_precision: custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") if FLAGS.auto_tune: custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") with tf.Session(config=config) as sess: if FLAGS.restore: print('continue training from previous checkpoint') ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path) saver.restore(sess, ckpt) else: sess.run(init)
tensorflow.compat.as_bytes
14,454
import tensorflow as tf # Test when num_decoder_symbols is provided, the size of decoder output # is num_decoder_symbols. with tf.variable_scope("decoder_symbols_seq2seq"): dec, mem = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = tf.get_variable("proj_w", [2, 5]) b = tf.get_variable("proj_b", [5]) with tf.variable_scope("proj_seq2seq"): dec, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([tf.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [tf.constant(0, tf.int32, shape=[2])] * 3 with tf.variable_scope("other"): d3, _ = tf.nn.seq2seq.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell, num_symbols=5, embedding_size=2, feed_previous=tf.constant(True))
tensorflow.variable_scope
14,455
import tensorflow as tf if decoder.use_previous_word: projection_input.insert(1, input_) # for back-compatibility output_ = tf.concat(projection_input, axis=1) if decoder.pred_deep_layer:
tensorflow.concat
14,456
from tensorflow.contrib.framework import deprecated result[name] = metric(predictions, labels) return result @deprecated( "2016-11-12", "This file will be removed after the deprecation date." "Please switch to " "third_party/tensorflow/contrib/learn/python/learn/estimators/head.py") def get_default_binary_metrics_for_eval(thresholds):
tensorflow.contrib.framework.deprecated
14,457
import tensorflow as tf top_span_ends = tf.gather(candidate_ends, top_span_indices) # [k] top_span_emb = tf.gather(candidate_span_emb, top_span_indices) # [k, emb] top_span_cluster_ids = tf.gather(candidate_cluster_ids, top_span_indices) # [k] top_span_mention_scores = tf.gather(candidate_mention_scores, top_span_indices) # [k] top_span_sentence_indices = tf.gather(candidate_sentence_indices, top_span_indices) # [k] top_span_speaker_ids = tf.gather(speaker_ids, top_span_starts) # [k] c = tf.minimum(self.config["max_top_antecedents"], k) if self.config["coarse_to_fine"]:
tensorflow.gather
14,458
import tensorflow as tf scale: The amount of scaling applied to the input. Returns: Scaled dimension. """ if isinstance(dim, tf.Tensor): return tf.cast((tf.to_float(dim) - 1.0) * scale + 1.0, dtype=tf.int32) else: return int((float(dim) - 1.0) * scale + 1.0) def multi_scale_logits(images,
tensorflow.to_float
14,459
from tensorflow.python.ops import array_ops `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions, normalizer = tensor_util.remove_squeezable_dimensions( predictions, normalizer) predictions.get_shape().assert_is_compatible_with(normalizer.get_shape()) relative_errors = math_ops.select( math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels), math_ops.div(math_ops.abs(labels - predictions), normalizer)) return streaming_mean(relative_errors, weights, metrics_collections, updates_collections, name or 'mean_relative_error') def streaming_mean_squared_error(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the mean squared error between the labels and predictions. The `streaming_mean_squared_error` function creates two local variables,
tensorflow.python.ops.array_ops.zeros_like
14,460
from tensorflow.python.framework import ops def _hard_label_shape(op): output_shape = op.inputs[0].get_shape() return [output_shape] @ops.RegisterGradient("Hardlabel") def _hard_label_grad(op, grad): bottom_prob = op.inputs[0] bottom_gt = op.inputs[1]
tensorflow.python.framework.ops.RegisterGradient
14,461
import tensorflow as tf pct = tf.math.count_nonzero(loss, dtype=tf.float32) / tf.size(loss, out_type=tf.float32) p = tf.cond(tf.random_uniform((), dtype=tf.float32) < 1e-4, lambda: tf.print('csrt acc ', [pct]), lambda: tf.no_op()) with tf.control_dependencies([p]): return tf.reduce_mean(loss) loss = tf.map_fn(fn=lambda inp: sample_compute(inp), elems=tf.range(resample), dtype=tf.float32, parallel_iterations=32) final_loss = tf.reduce_mean(loss) return final_loss def contra_traj_lossV1(pred, tgt, temp=10.0):
tensorflow.range
14,462
import tensorflow as tf def log_sum_exp(x): """numerically stable log_sum_exp implementation that prevents overflow.""" axis = len(x.get_shape()) - 1 m = tf.reduce_max(x, axis) m2 = tf.reduce_max(x, axis, keep_dims=True) return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis)) def log_prob_from_logits(x): """numerically stable log_softmax implementation that prevents overflow."""
tensorflow.reduce_max
14,463
from tensorflow.contrib import framework as contrib_framework return eval_results def _infer_model(self, x, batch_size=None, axis=None, proba=False): # Converts inputs into tf.DataFrame / tf.Series. batch_size = -1 if batch_size is None else batch_size input_fn, feed_fn = _get_predict_input_fn(x, batch_size) checkpoint_path = saver.latest_checkpoint(self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features, _ = input_fn() feed_dict = feed_fn() if feed_fn is not None else None predictions = self._get_predict_ops(features) if not isinstance(predictions, dict): predictions = {'predictions': predictions} # TODO(ipolosukhin): Support batching return infer(checkpoint_path, predictions, feed_dict=feed_dict)
tensorflow.contrib.framework.create_global_step
14,464
import tensorflow as tf objectives.append((Objective(name, contra_loss, min, include, exclude))) elif name == 'reward' and config.r_loss == 'l2': pred = heads[name](features) l2_loss = tf.compat.v1.losses.mean_squared_error(target[name], pred) # l2_loss = tf.nn.l2_loss(pred - target[name]) objectives.append((Objective(name, l2_loss, min, include, exclude)))
tensorflow.compat.v1.losses.mean_squared_error
14,465
import tensorflow as tf else: assert tf.get_variable_scope().reuse is False """U-Net Generator""" def lrelu(x, alpha,name='lrelu'): with tf.variable_scope(name): return tf.nn.relu(x) - alpha * tf.nn.relu(-x) def instance_norm(x,name='instance_norm'): with tf.variable_scope(name): if reuse: tf.get_variable_scope().reuse_variables() else: assert tf.get_variable_scope().reuse is False epsilon = 1e-5 mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) scale = tf.get_variable('scale',[x.get_shape()[-1]], initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) offset = tf.get_variable('offset',[x.get_shape()[-1]],initializer=tf.constant_initializer(0.0)) out = scale*tf.div(x-mean, tf.sqrt(var+epsilon)) + offset return out
tensorflow.get_variable_scope
14,466
import tensorflow as tf self.vf_eval, _ = self.build_cnet(self.state, 'vf', reuse=True) self.sample_action = tf.squeeze(pi_eval.sample(1), axis=0) self.eval_action = pi_eval.mode() self.global_step = tf.train.get_or_create_global_step() self.saver = tf.train.Saver() # Loss functions and training epsilon_decay = tf.train.polynomial_decay(self.EPSILON, self.global_step, self.EPS_LEN, 0.1, power=0) ratio = tf.maximum(pi.prob(batch['actions']), 1e-6) / tf.maximum(pi_old.prob(batch['actions']), 1e-6) ratio = tf.clip_by_value(ratio, 0, 10) surr1 = batch['advantage'] * ratio surr2 = batch['advantage'] * tf.clip_by_value(ratio, 1 - epsilon_decay, 1 + epsilon_decay) loss_pg = - 2.0 * tf.reduce_mean(tf.minimum(surr1, surr2)) loss_vf = 0.5 * tf.reduce_mean(tf.square(batch['rewards'] - self.vf)) loss_entropy = - 0.01 * tf.reduce_mean(pi.entropy()) loss = loss_pg + loss_vf + loss_entropy opt = tf.train.AdamOptimizer(self.LR) self.train_op = opt.minimize(loss, global_step=self.global_step, var_list=pi_params + vf_params) self.pi_new_params = [oldp.assign(p) for p, oldp in zip(pi_params, pi_old_params)] self.vf_new_params = [oldp.assign(p) for p, oldp in zip(vf_params, vf_old_params)] self.sess.run(tf.global_variables_initializer()) # Tensorboard if summary_dir is not None: self.writer = tf.summary.FileWriter(summary_dir)
tensorflow.square
14,467
import tensorflow as tf def body(self, features): observations = features["inputs"] flat_x = tf.layers.flatten(observations) with tf.variable_scope("dense_bitwise"): flat_x = discretization.int_to_bit_embed(flat_x, 8, 32)
tensorflow.variable_scope
14,468
import tensorflow as tf if not forward_only: lstm_cell = tf.nn.rnn_cell.DropoutWrapper(cell=lstm_cell, output_keep_prob=self.dropout_output) # lstm_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[lstm_cell] * 4, state_is_tuple=True) if not forward_only: embed_inputs = tf.nn.dropout(embed_inputs, keep_prob=self.dropout_input) rnn_outputs, output_states = tf.nn.dynamic_rnn( cell=lstm_cell, inputs=embed_inputs, dtype=tf.float32, sequence_length=self.seq_len, ) ## (batch_size, seq_len, num_hidden)
tensorflow.nn.dynamic_rnn
14,469
import tensorflow as tf def get_stn(image): stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', out_dim=32) .FullyConnected('fct', out_dim=6, nl=tf.identity, W_init=tf.constant_initializer(), b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))()) # output 6 parameters for affine transformation stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3 stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = ImageSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], nl=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard
tensorflow.matmul
14,470
import tensorflow as tf decayed_learning_rate = (max_lr - learning_rate) * (floor(global_step / step_size) - global_step / step_size) + learning_rate """ with tf.name_scope(name): learning_rate = tf.cast(learning_rate, dtype=tf.float32) global_step = tf.cast(global_step, dtype=tf.float32) step_size = tf.cast(step_size, dtype=tf.float32) max_lr = tf.cast(max_lr, dtype=tf.float32) if mode == 'tri': periodic_comp = tf.mod((global_step + step_size / 4) / step_size, 1) first_factor = tf.abs(periodic_comp - 0.5)
tensorflow.cast
14,471
import tensorflow as tf 'bboxes_predict': tf.reshape(bboxes_pred, [-1, 4]) } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.cond(n_positives > 0., lambda: tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred), lambda: 0.) #cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=glabels, logits=cls_pred) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy_loss') tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss)
tensorflow.identity
14,472
from tensorflow.python.ops import control_flow_ops t_vars['g_vars'], self.g_losses[-1], g_optimizer, gradient_noise_scale=0.0) else: self.capped_d_grads = self._clip_grad_norms( d_optimizer.compute_gradients(self.d_losses[-1], t_vars['d_vars'])) self.capped_g_grads = self._clip_grad_norms( g_optimizer.compute_gradients(self.g_losses[-1], t_vars['g_vars'])) global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False) if self.gradient_multipliers is not None: with tf.name_scope('multiply_grads'): self.capped_d_grads = self._multiply_gradients(self.capped_d_grads, self.gradient_multipliers) apply_d_gradient_op = d_optimizer.apply_gradients(self.capped_d_grads, global_step=global_step) apply_g_gradient_op = g_optimizer.apply_gradients(self.capped_g_grads, global_step=global_step) self.train_op_d = control_flow_ops.with_dependencies([apply_d_gradient_op], self.d_losses[-1]) self.train_op_g = control_flow_ops.with_dependencies([apply_g_gradient_op], self.g_losses[-1])
tensorflow.python.ops.control_flow_ops.with_dependencies
14,473
import tensorflow as tf learning_rate=learning_rate, clip_gradients=params.clip_grad_norm or None, optimizer=opt, colocate_gradients_with_ops=True ) zero_op = tf.no_op("zero_op") collect_op = tf.no_op("collect_op") else: grads_and_vars = opt.compute_gradients( loss, colocate_gradients_with_ops=True) gradients = [item[0] for item in grads_and_vars] variables = [item[1] for item in grads_and_vars]
tensorflow.no_op
14,474
import tensorflow as tf output_weight = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02) ) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer() ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
tensorflow.nn.dropout
14,475
from tensorflow.python.ops import math_ops false_positives = _create_local('false_positives', shape=[num_thresholds]) is_true_positive = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_pos)) is_false_negative = math_ops.to_float( math_ops.logical_and(label_is_pos, pred_is_neg)) is_false_positive = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_pos)) is_true_negative = math_ops.to_float( math_ops.logical_and(label_is_neg, pred_is_neg)) if weights is not None: weights = math_ops.to_float(weights) weights_tiled = array_ops.tile(array_ops.reshape( _broadcast_weights(weights, predictions), [1, -1]), [num_thresholds, 1]) thresh_tiled.get_shape().assert_is_compatible_with( weights_tiled.get_shape()) is_true_positive *= weights_tiled is_false_negative *= weights_tiled is_false_positive *= weights_tiled is_true_negative *= weights_tiled true_positives_update_op = state_ops.assign_add( true_positives, math_ops.reduce_sum(is_true_positive, 1))
tensorflow.python.ops.math_ops.to_float
14,476
import tensorflow as tf pad a vector with a zero row and gather with input inds """ if pad is None: pad = tf.expand_dims(tf.zeros_like(vecs[0]), 0) else: pad = tf.expand_dims(pad, 0) vecs_padded = tf.concat(0, [vecs, pad]) # flatten mask and edges vecs_gathered = tf.gather(vecs_padded, mask_inds) return vecs_gathered def padded_segment_reduce(vecs, segment_inds, num_segments, reduction_mode):
tensorflow.concat
14,477
import tensorflow as tf x = tfd.Normal(loc=0., scale=1., name="x") x_duplicate = tfd.Normal(loc=0., scale=1., name="x") with tf.name_scope("y") as name: y = tfd.Bernoulli(logits=0., name=name)
tensorflow.name_scope
14,478
import tensorflow as tf initial = tf.constant(0.1, shape=shape, name='Bias') return tf.Variable(initial)
tensorflow.Variable
14,479
from tensorflow.python.ops import math_ops predictions, labels, weights=weights, name='covariance') var_predictions, update_var_predictions = streaming_covariance( predictions, predictions, weights=weights, name='variance_predictions') var_labels, update_var_labels = streaming_covariance( labels, labels, weights=weights, name='variance_labels') pearson_r = _safe_div( cov, math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)), 'pearson_r') with ops.control_dependencies( [update_cov, update_var_predictions, update_var_labels]): update_op = _safe_div(update_cov, math_ops.mul( math_ops.sqrt(update_var_predictions), math_ops.sqrt(update_var_labels)), 'update_op') if metrics_collections: ops.add_to_collections(metrics_collections, pearson_r) if updates_collections: ops.add_to_collections(updates_collections, update_op) return pearson_r, update_op # TODO(nsilberman): add a 'normalized' flag so that the user can request # normalization if the inputs are not normalized. def streaming_mean_cosine_distance(predictions, labels, dim, weights=None,
tensorflow.python.ops.math_ops.sqrt
14,480
import tensorflow as tf self.example_weights = tf.placeholder(tf.float32, [batch_size], name='example_weights') # get one 'word' embedding for the full tweet tweet_embedding = c2v.GetEmbeddings(self.x)[:,1,:] logits = tf.nn.xw_plus_b(tweet_embedding, hidden, bias) self.probs = tf.nn.softmax(logits) self._xent = tf.nn.softmax_cross_entropy_with_logits(logits, self.y) self.cost = tf.reduce_mean(self.example_weights * self._xent) class WordLevelModel(object):
tensorflow.nn.softmax
14,481
import tensorflow as tf with tf.control_dependencies([p]): return tf.reduce_mean(loss)
tensorflow.reduce_mean
14,482
import tensorflow as tf gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) def main(self): with tf.Graph().as_default() as graph, tf.device('/cpu:0'): num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
tensorflow.Graph
14,483
import tensorflow as tf z, = tf.py_func(my_func, [x, y], [tf.complex64]) self.assertAllClose(z.eval(), my_func(1+2j, 3+4j)) # a bit excotic function (rfft) with self.test_session(): x = tf.constant([1., 2., 3., 4.], tf.float32) def rfft(x): return np.fft.rfft(x).astype(np.complex64) y, = tf.py_func(rfft, [x], [tf.complex64]) self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.]))
tensorflow.constant
14,484
import tensorflow as tf label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR']) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
tensorflow.contrib.tpu.TPUConfig
14,485
import tensorflow as tf pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss def compute_error_loss(pred1, pred2, tgt1, tgt2, hard_ratio=1.0): geq = tf.cast((tgt1 - tgt2) > 0, tf.bool) tgt_larg = tf.where(geq, tgt1, tgt2) tgt_small = tf.where(geq, tgt2, tgt1) pred_larg = tf.where(geq, pred1, pred2) pred_small = tf.where(geq, pred2, pred1) loss = tf.maximum(0., (tgt_larg - tgt_small) - (pred_larg - pred_small)) if hard_ratio < 1.0: hard_num = tf.cast(tools.shape(pred1)[0] * hard_ratio, tf.int32) loss = tf.reshape(loss, [-1]) hard_loss, _ = tf.math.top_k(loss, k=hard_num) return hard_loss return loss
tensorflow.where
14,486
import tensorflow as tf with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state('./model_pretrain') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): print("loading checkpoint...") saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) summary_writer = tf.summary.FileWriter('./logs_pretrain', sess.graph) _x = x[:, :, :, ::-1] tf.summary.image('x', _x, 4) summary_op = tf.summary.merge_all() epoch_learning_rate = init_learning_rate
tensorflow.summary.FileWriter
14,487
import tensorflow as tf norm_grads = None if self.max_grad_norm is not None: grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm) grads = list(zip(grads, self.params)) with tf.variable_scope("input_info", reuse=False): tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph)) tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate)) tf.summary.scalar('advantage', tf.reduce_mean(adv)) tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph)) if self.full_tensorboard_log:
tensorflow.reduce_mean
14,488
import tensorflow as tf import tensorflow as tf from .tensorflowcv.model_provider import get_model def save_model_params(sess, file_path): # assert file_path.endswith('.npz') param_dict = {v.name: v.eval(sess) for v in tf.global_variables()} np.savez_compressed(file_path, **param_dict) def load_model_params(net, param_dict, sess, ignore_missing=False):
tensorflow.global_variables
14,489
import tensorflow as tf """ micros = int(time.time()*10**6) scope_name = str(micros) op_list = [] with tf.name_scope(scope_name): yield op_list g = tf.get_default_graph() op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
tensorflow.name_scope
14,490
import tensorflow as tf def _conv(name, x, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1), initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, dilation=1.0, bias=-1): with tf.variable_scope(name): stride = [1, stride[0], stride[1], 1] kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters] w = variable_with_weight_decay(kernel_shape, initializer, l2_strength) variable_summaries(w) if dilation > 1: conv = tf.nn.atrous_conv2d(x, w, dilation, padding) else: if type(padding)==type(''): conv = tf.nn.conv2d(x, w, stride, padding) else: conv = tf.pad(x, padding, "CONSTANT") conv = tf.nn.conv2d(conv, w, stride, padding='VALID') if bias != -1: bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias)) variable_summaries(bias) conv = tf.nn.bias_add(conv, bias) tf.add_to_collection('debug_layers', conv) return conv
tensorflow.nn.conv2d
14,491
from tensorflow.python.ops import math_ops factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm), math_ops.exp(log_mean) / norm)
tensorflow.python.ops.math_ops.exp
14,492
from tensorflow.python.ops import math_ops trainable=False) return moving_averages.assign_moving_average( moving_average_variable, value, decay, zero_debias=False) # quicker adaptation at the beginning if global_step is not None: n = math_ops.cast(global_step, dtypes.float32) decay = math_ops.minimum(decay, n / (n + 1.)) # update averages mean = moving_average("mean", log_norm, decay) sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
tensorflow.python.ops.math_ops.cast
14,493
import tensorflow as tf zeros = array_ops.zeros_like(logits, dtype=logits.dtype) pos_p_sub = array_ops.where(labels > zeros, labels - logits, zeros) neg_p_sub = array_ops.where(labels > zeros, zeros, logits) cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(logits, 1e-8, 1.0)) \ - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - logits, 1e-8, 1.0)) return tf.reduce_sum(cross_ent, 1) start_label = tf.one_hot(self.start_label, tf.shape(self.logits1)[1], axis=1) end_label = tf.one_hot(self.end_label, tf.shape(self.logits2)[1], axis=1) if self.config.loss_type == 'cross_entropy': start_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits1, labels=start_label) end_loss = tf.nn.softmax_cross_entropy_with_logits( logits=self.logits2, labels=end_label) self.loss = tf.reduce_mean(start_loss + end_loss) else: start_loss = focal_loss(tf.nn.softmax(self.logits1, -1), start_label) end_loss = focal_loss(tf.nn.softmax(self.logits2, -1), end_label) self.loss = tf.reduce_mean(start_loss + end_loss) self.logger.info("loss type %s" % self.config.loss_type) self.all_params = tf.trainable_variables() if self.config.l2_norm is not None: self.logger.info("applying l2 loss") variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) l2_loss = tf.contrib.layers.apply_regularization(regularizer, variables) self.loss += l2_loss
tensorflow.reduce_mean
14,494
import tensorflow as tf # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1]) self.lengths_transposed = lengths_transposed self.lengths_tiled = lengths_tiled self.range_row = range_row self.range_tiled = range_tiled # Use the logical operations to create a mask indicator = tf.less(range_tiled, lengths_tiled+1) #i.e. where seq len is less than index trim = np.ones(indicator.get_shape()) trim[:,0] = 0 #ignore start symbol indicator = tf.logical_and(indicator, trim.astype(bool)) self.indicator = indicator sz = [batch_size, max_sequence_len] self._mask = tf.select(indicator, tf.ones(sz), tf.zeros(sz)) #-------------------------------# self.weights = tf.constant(weights, dtype=tf.float32, name='class_weights')
tensorflow.less
14,495
import tensorflow as tf if FLAGS.forward_only and FLAGS.eval: raise ValueError('Only one of forward_only and eval flags is true') if FLAGS.eval: return 'evaluation' if FLAGS.forward_only: return 'forward-only' return 'training' def benchmark_one_step(sess, fetches, step, batch_size, step_train_times, trace_filename, summary_op=None): """Advance one step of benchmarking.""" if trace_filename is not None and step == -1: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() else: run_options = None run_metadata = None summary_str = None start_time = time.time() if summary_op is None: results = sess.run(fetches, options=run_options, run_metadata=run_metadata) else: (results, summary_str) = sess.run( [fetches, summary_op], options=run_options, run_metadata=run_metadata) if not FLAGS.forward_only: lossval = results[1] else:
tensorflow.RunMetadata
14,496
from tensorflow.python.ops import variable_scope as vs bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state]))
tensorflow.python.ops.variable_scope.variable_scope
14,497
import tensorflow as tf tf.GraphKeys.VARIABLES], initializer=tf.ones_initializer(),
tensorflow.ones_initializer
14,498
import tensorflow as tf output_vars1 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_dependent") output_vars2 = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="task_independent/Variable_1") var_list = [output_vars1, output_vars2] train_op = tf.train.AdamOptimizer( # learning_rate=self.params["learning_rate"] # **self.params.get("optimizer_params", {}) # learning_rate=learning_rate **optimizer_params ).minimize(loss, global_step=global_step, var_list=var_list) return tf.estimator.EstimatorSpec( self.mode, loss=loss, train_op=train_op )
tensorflow.estimator.EstimatorSpec
14,499