seed
stringlengths
25
2.89k
seed_api
stringlengths
14
102
index
int64
0
14.8k
import tensorflow as tf def fc_layer(layer_name, x, out_nodes): ''' Wrapper for fully connected layers with RELU activation as default ''' shape = x.get_shape() if len(shape) == 5: # FC 3D size = shape[1].value*shape[2].value*shape[3].value*shape[4].value elif len(shape) == 4: size = shape[1].value*shape[2].value*shape[3].value else: size = shape[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', shape=[size, out_nodes], initializer=tf.constant_initializer(0.0)) b = tf.get_variable(name='bias', shape=[out_nodes], initializer=tf.constant_initializer(0.0)) # batch? flat_x = tf.reshape(x, [-1,size]) x = tf.nn.bias_add(tf.matmul(flat_x,w), b) x = tf.nn.relu(x)
tensorflow.variable_scope
0
import tensorflow as tf ch_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.ch), [N * PL, CL, dc]) qh_emb = tf.reshape(tf.nn.embedding_lookup( self.char_mat, self.qh), [N * QL, CL, dc]) ch_emb = tf.nn.dropout(ch_emb, 1.0 - 0.5 * self.dropout) qh_emb = tf.nn.dropout(qh_emb, 1.0 - 0.5 * self.dropout) # Bidaf style conv-highway encoder ch_emb = conv(ch_emb, d, bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = None) qh_emb = conv(qh_emb, d, bias = True, activation = tf.nn.relu, kernel_size = 5, name = "char_conv", reuse = True) ch_emb = tf.reduce_max(ch_emb, axis = 1) qh_emb = tf.reduce_max(qh_emb, axis = 1) ch_emb = tf.reshape(ch_emb, [N, PL, ch_emb.shape[-1]]) qh_emb = tf.reshape(qh_emb, [N, QL, ch_emb.shape[-1]]) c_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.c), 1.0 - self.dropout) q_emb = tf.nn.dropout(tf.nn.embedding_lookup(self.word_mat, self.q), 1.0 - self.dropout) c_emb = tf.concat([c_emb, ch_emb], axis=2) q_emb = tf.concat([q_emb, qh_emb], axis=2) c_emb = highway(c_emb, size = d, scope = "highway", dropout = self.dropout, reuse = None)
tensorflow.reduce_max
1
import tensorflow as tf def testGPU(self): if not tf.test.is_built_with_cuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path)
tensorflow.initialize_all_variables
2
import tensorflow as tf 'learning_rate', tf.reduce_mean(learning_rate), step=global_step) return tf.contrib.summary.all_summary_ops() # To log the loss, current learning rate, and epoch for Tensorboard, the # summary op needs to be run on the host CPU via host_call. host_call # expects [batch_size, ...] Tensors, thus reshape to introduce a batch # dimension. These Tensors are implicitly concatenated to # [params['batch_size']]. global_step_t = tf.reshape(global_step, [1]) total_loss_t = tf.reshape(total_loss, [1]) total_rpn_loss_t = tf.reshape(total_rpn_loss, [1]) rpn_score_loss_t = tf.reshape(rpn_score_loss, [1]) rpn_box_loss_t = tf.reshape(rpn_box_loss, [1]) total_fast_rcnn_loss_t = tf.reshape(total_fast_rcnn_loss, [1]) fast_rcnn_class_loss_t = tf.reshape(fast_rcnn_class_loss, [1]) fast_rcnn_box_loss_t = tf.reshape(fast_rcnn_box_loss, [1]) mask_loss_t = tf.reshape(mask_loss, [1]) learning_rate_t = tf.reshape(learning_rate, [1]) host_call = (host_call_fn, [global_step_t, total_loss_t, total_rpn_loss_t, rpn_score_loss_t, rpn_box_loss_t, total_fast_rcnn_loss_t, fast_rcnn_class_loss_t, fast_rcnn_box_loss_t,
tensorflow.reshape
3
import tensorflow as tf b1 = tf.matmul(state, hyper_b_1) w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer]) # [batch, 1, n_h_mixer] hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped) # Second layer w_final = tf.abs(tf.matmul(state, hyper_w_final))
tensorflow.matmul
4
from tensorflow.contrib.boosted_trees.proto import learner_pb2 num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config, feature_columns=[core_feature_column.numeric_column("x")], use_core_libs=True) model.fit(input_fn=_train_input_fn, steps=15) model.evaluate(input_fn=_eval_input_fn, steps=1) model.export(self._export_dir_base) def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self): learner_config = learner_pb2.LearnerConfig() learner_config.num_classes = 2 learner_config.constraints.max_tree_depth = 1 model_dir = tempfile.mkdtemp() config = run_config.RunConfig() classifier = estimator.GradientBoostedDecisionTreeClassifier( learner_config=learner_config, num_trees=1, examples_per_layer=3, model_dir=model_dir, config=config,
tensorflow.contrib.boosted_trees.proto.learner_pb2.LearnerConfig
5
import tensorflow as tf "below 1.1.0") soft_placement = False if FLAGS.num_gpus > 1: soft_placement = True util.auto_parallel(metagraph, m) with tf.Graph().as_default(): tf.train.import_meta_graph(metagraph) for model in models.values(): model.import_ops() sv = tf.train.Supervisor(logdir=FLAGS.save_path) config_proto = tf.ConfigProto(allow_soft_placement=soft_placement)
tensorflow.Graph
6
import tensorflow as tf transpose=transpose) if w_project is not None: x = tf.conv2d(x, w_project, strides, padding='SAME') # Set shape for BN in the residual function.
tensorflow.conv2d
7
import tensorflow as tf if FLAGS.use_tpu: estimator = tf.contrib.tpu.TPUEstimator(
tensorflow.contrib.tpu.TPUEstimator
8
from tensorflow.python.ops.rnn_cell_impl import _Linear [inputs, state], 2 * self._num_units, True, bias_initializer=bias_ones, kernel_initializer=self._kernel_initializer) value = math_ops.sigmoid(self._gate_linear([inputs, state])) r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1) r_state = r * state if self._candidate_linear is None: with vs.variable_scope("candidate"): self._candidate_linear = _Linear( [inputs, r_state], self._num_units, True, bias_initializer=self._bias_initializer, kernel_initializer=self._kernel_initializer) c = self._activation(self._candidate_linear([inputs, r_state])) u = (1.0 - att_score) * u new_h = u * state + (1 - u) * c return new_h, new_h def prelu(_x, scope=''):
tensorflow.python.ops.rnn_cell_impl._Linear
9
from tensorflow.contrib.layers.python.layers import utils # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_second_moment_op = utils.smart_cond( is_training, build_update_ops, build_no_ops,
tensorflow.contrib.layers.python.layers.utils.smart_cond
10
import tensorflow as tf self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) self._new_lr = tf.placeholder( tf.float32, shape=[], name="new_learning_rate") self._lr_update = tf.assign(self._lr, self._new_lr)
tensorflow.placeholder
11
import tensorflow as tf with self.test_session() as session: @dynamic_batching.batch_fn def f(a, b): return a + b output0 = f(tf.constant([1]), tf.constant([2])) output1 = f(tf.constant([[2]]), tf.constant([3])) tp = pool.ThreadPool(2) f0 = tp.apply_async(session.run, [output0]) f1 = tp.apply_async(session.run, [output1]) time.sleep(_SLEEP_TIME) coord = tf.train.Coordinator() tf.train.start_queue_runners(coord=coord) with self.assertRaises(tf.errors.CancelledError): f0.get() f1.get() with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Shapes of inputs much be equal'): coord.join() def test_output_must_have_batch_dimension(self): with self.test_session() as session: @dynamic_batching.batch_fn def f(_):
tensorflow.train.Coordinator
12
import tensorflow as tf if tf.version.VERSION[0]=="2": gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory_limit)]) else: gpu_options = tf.GPUOptions(allow_growth=allow_growth, per_process_gpu_memory_fraction=fraction) config = tf.ConfigProto(gpu_options=gpu_options) session = tf.Session(config=config) K.set_session(session) def multi_gpu(model, gpus=None, cpu_merge=True, cpu_relocation=False): '''Takes as input the model, and returns a model
tensorflow.ConfigProto
13
import tensorflow as tf stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func") # Perturbable Q used for the actual rollout. q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func") # We have to wrap this code into a function due to the way tf.cond() works. See
tensorflow.constant_initializer
14
import tensorflow as tf with tf.variable_scope(name): filt = self.get_conv_filter(name) conv = tf.nn.conv2d(bottom, filt, [1, 1, 1, 1], padding='SAME') conv_biases = self.get_bias(name) bias = tf.nn.bias_add(conv, conv_biases) relu = tf.nn.relu(bias) return relu def fc_layer(self, bottom, name):
tensorflow.nn.bias_add
15
import tensorflow as tf gru_fw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) gru_bw = tf.contrib.cudnn_rnn.CudnnGRU(1, num_units) init_fw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) init_bw = tf.tile(tf.Variable( tf.zeros([1, 1, num_units])), [1, batch_size, 1]) mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32), keep_prob=keep_prob, is_train=is_train, mode=None) self.grus.append((gru_fw, gru_bw, )) self.inits.append((init_fw, init_bw, ))
tensorflow.ones
16
import tensorflow as tf x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id, dilations=(1, dilation, dilation, 1)) if use_bias: bias = tf.get_variable("bias", [channels], initializer=tf.constant_initializer(0.0)) x = tf.nn.bias_add(x, bias) return x def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding="SAME", init_stddev=-1.0): # good old t-conv. I love it!
tensorflow.nn.bias_add
17
import tensorflow as tf raise ValueError('Variables to load is empty.') return tf.train.Scaffold()
tensorflow.train.Scaffold
18
import tensorflow as tf def find_hard_distances(distance_matrix, indicator_matrix): distance_matrix = tf.where( tf.stop_gradient(indicator_matrix), distance_matrix, tf.fill(tf.shape(distance_matrix), distance_matrix.dtype.max)) hard_distances = tf.math.reduce_min(distance_matrix, axis=-1) return hard_distances hard_negative_mining_distances = find_hard_distances( anchor_match_mining_distance_matrix, indicators) indicators &= tf.math.equal( anchor_match_mining_distance_matrix, tf.expand_dims(hard_negative_mining_distances, axis=-1)) hard_negative_distances = find_hard_distances(anchor_match_distance_matrix, indicators) return hard_negative_distances, hard_negative_mining_distances def compute_hard_negative_triplet_loss( anchor_positive_distances, anchor_match_distance_matrix, anchor_match_negative_indicator_matrix,
tensorflow.expand_dims
19
import tensorflow as tf lambda: param_noise_scale.assign(param_noise_scale * 1.01), lambda: param_noise_scale.assign(param_noise_scale / 1.01), ) return update_scale_expr # Functionality to update the threshold for parameter space noise. update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold)) # Put everything together. deterministic_actions = tf.argmax(q_values_perturbed, axis=1) batch_size = tf.shape(observations_ph.get())[0]
tensorflow.cond
20
import tensorflow as tf y += dense(hidden, encoder.attn_size, use_bias=False, name='U_a') if encoder.position_bias and input_length is not None and time is not None: src_pos = tf.tile(tf.expand_dims(tf.range(time_steps), axis=0), [batch_size, 1]) trg_pos = tf.tile(tf.reshape(time, [1, 1]), [batch_size, time_steps]) src_len = tf.tile(tf.expand_dims(input_length, axis=1), [1, time_steps]) # - 1 pos_feats = tf.to_float(tf.stack([src_pos, trg_pos, src_len], axis=2)) pos_feats = tf.log(1 + pos_feats) y += dense(pos_feats, encoder.attn_size, use_bias=False, name='P_a')
tensorflow.expand_dims
21
import tensorflow as tf filename = _DATA_URL.split('/')[-1] filepath = os.path.join(dataset_dir, filename) tf.gfile.Remove(filepath)
tensorflow.gfile.Remove
22
import tensorflow as tf elif self.hidden_init == 'zeros': l1_h2 = tf.zeros(x_shape, dtype=self.dtype) l2_h2 = tf.zeros(l2_shape, dtype=self.dtype) l3_h2 = tf.zeros(l3_shape, dtype=self.dtype) else: raise RuntimeError
tensorflow.zeros
23
import tensorflow as tf def main(argv=None): start1 = time.time() import os os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list if not tf.gfile.Exists(FLAGS.checkpoint_path): tf.gfile.MkDir(FLAGS.checkpoint_path) else: if not FLAGS.restore: tf.gfile.DeleteRecursively(FLAGS.checkpoint_path) tf.gfile.MkDir(FLAGS.checkpoint_path) input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images') input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='input_score_maps') if FLAGS.geometry == 'RBOX': input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5], name='input_geo_maps') else: input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 8], name='input_geo_maps')
tensorflow.gfile.DeleteRecursively
24
import tensorflow as tf """ mean, var = tf.nn.moments( x, reduction_axes, shift=None, name=None, keep_dims=False) if sorted(reduction_axes) == range(ndim(x))[:-1]: normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon) else: # need broadcasting target_shape = [] for axis in range(get_ndim(x)): if axis in reduction_axes: target_shape.append(1) else: target_shape.append(tf.shape(x)[axis]) target_shape = stack(target_shape) broadcast_mean = tf.reshape(mean, target_shape) broadcast_var = tf.reshape(var, target_shape) broadcast_gamma = tf.reshape(gamma, target_shape) broadcast_beta = tf.reshape(beta, target_shape) normed = tf.nn.batch_normalization(x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normed, mean, var def ones(shape, dtype=None, name=None): """Instantiates an all-ones tensor variable and returns it. Parameters ---------- shape: Tuple of integers, shape of returned Keras variable.
tensorflow.reshape
25
import tensorflow as tf Arguments: - *indicator*: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. - *num_samples*: int32 scalar tensor Returns: A boolean tensor with the same shape as input (indicator) tensor """ indices = tf.where(indicator) indices = tf.random.shuffle(indices) indices = tf.reshape(indices, [-1]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1) def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5): """Subsamples minibatches to a desired balance of positives and negatives. Arguments: - *indicator*: boolean tensor of shape [N] whose True entries can be sampled. - *sample_size*: desired batch size. If None, keeps all positive samples and
tensorflow.size
26
import tensorflow as tf pos_weight = pos_weight, norm = norm) # Normalization and preprocessing on adjacency matrix adj_norm = preprocess_graph(adj) adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0])) # Initialize TF session sess = tf.Session() sess.run(tf.global_variables_initializer()) # Model training print(f"Training {model_name}...") t = time.time() print_every = 50
tensorflow.Session
27
import tensorflow as tf e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net') e_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper') t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper') with tf.variable_scope('soft_replacement'): self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = 'logs/' + current_time self.summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph) def _build_net(self): # we use parameter sharing among agents with tf.variable_scope(self.name): # ------------------ all inputs ------------------------ self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1 self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1 self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1 self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ???
tensorflow.summary.FileWriter
28
import tensorflow as tf self.num_replicas = num_replicas self.name = name self._create_params() arc_seq_1, entropy_1, log_prob_1, c, h = self._build_sampler(use_bias=True) arc_seq_2, entropy_2, log_prob_2, _, _ = self._build_sampler(prev_c=c, prev_h=h) self.sample_arc = (arc_seq_1, arc_seq_2) self.sample_entropy = entropy_1 + entropy_2 self.sample_log_prob = log_prob_1 + log_prob_2 def _create_params(self): initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1) with tf.variable_scope(self.name, initializer=initializer): with tf.variable_scope("lstm"): self.w_lstm = [] for layer_id in range(self.lstm_num_layers): with tf.variable_scope("layer_{}".format(layer_id)): w = tf.get_variable("w", [2 * self.lstm_size, 4 * self.lstm_size]) self.w_lstm.append(w) self.g_emb = tf.get_variable("g_emb", [1, self.lstm_size]) with tf.variable_scope("emb"): self.w_emb = tf.get_variable("w", [self.num_branches, self.lstm_size]) with tf.variable_scope("softmax"): self.w_soft = tf.get_variable("w", [self.lstm_size, self.num_branches]) b_init = np.array([10.0, 10.0] + [0] * (self.num_branches - 2),
tensorflow.variable_scope
29
import tensorflow as tf if self.multiplicative_excitation: if self.lesion_kappa: setattr( self, 'kappa_%s' % layer, tf.constant(0.)) else: setattr( self, 'kappa_%s' % layer,
tensorflow.constant
30
import tensorflow as tf def conv3d(layer_name, x, out_channels, kernel_size=[1,3,3], strides=[1,1,1,1,1], data_format='NDHWC', is_pretrain=True): ''' Convolution 3D op wrapper, use RELU activation after convolution ''' in_channels = x.get_shape()[-1].value with tf.variable_scope(layer_name): w = tf.get_variable(name='weight', trainable=is_pretrain, shape=[kernel_size[0],kernel_size[1],kernel_size[2],in_channels,out_channels], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable(name='bias', trainable=is_pretrain, shape=[out_channels], initializer=tf.contrib.layers.xavier_initializer()) x = tf.nn.conv3d(x, w, strides=strides, padding='SAME', data_format=data_format, name='conv3d') x = tf.nn.bias_add(x, b, name='bias_add') x = tf.nn.relu(x, name='relu') return x def conv(layer_name, x, out_channels, kernel_size=[3,3], strides=[1,1,1,1], is_pretrain=True): ''' Convolution op wrapper, use RELU activation after convolution Args: layer_name: x: input tensor
tensorflow.contrib.layers.xavier_initializer
31
import tensorflow as tf filter_size[1] - input_.get_shape().as_list()[2], 0 ], [-1, -1, -1, -1]) if bias: biases = variable_on_cpu("biases", [dim_out], tf.constant_initializer(0.)) res = tf.nn.bias_add(res, biases) if nonlinearity is not None: res = nonlinearity(res) return res def max_pool_2x2(input_): """Max pooling.""" return tf.nn.max_pool( input_, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") def depool_2x2(input_, stride=2): """Depooling.""" shape = input_.get_shape().as_list() batch_size = shape[0] height = shape[1] width = shape[2] channels = shape[3] res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels]) res = tf.concat( axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])])
tensorflow.nn.max_pool
32
import tensorflow as tf # Make a matrix where each row contains [0, 1, ..., max_sequence_len] r = tf.range(0, max_sequence_len, 1) range_row = tf.expand_dims(r, 0) range_tiled = tf.tile(range_row, [batch_size, 1]) # Use the logical operations to create a mask
tensorflow.tile
33
import tensorflow as tf # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
tensorflow.reduce_mean
34
import tensorflow as tf vocab_path = self.vocabulary_file_by_name(vocab_filename) if not vocab_path: raise ValueError( 'Could not compute vocabulary size for {}, does not exist'.format( vocab_filename)) elif vocab_path.endswith('tfrecord.gz'): dataset = tf.data.TFRecordDataset(vocab_path, compression_type='GZIP') def reduce_fn(accum, elem): return tf.size(elem, out_type=tf.int64, name='vocabulary_size') + accum return _get_tensor_value( dataset.batch(tf.int32.max).reduce( tf.constant(0, tf.int64), reduce_fn)) else: with tf.io.gfile.GFile(vocab_path, 'rb') as f: return sum(1 for _ in f) def vocabulary_by_name(self, vocab_filename: str) -> List[bytes]:
tensorflow.size
35
import tensorflow as tf def get_assignment_map_from_checkpoint(tvars, init_checkpoint, num_of_group=0): """Compute the union of the current variables and checkpoint variables.""" assignment_map = {} initialized_variable_names = {} name_to_variable = collections.OrderedDict() for var in tvars: name = var.name m = re.match("^(.*):\\d+$", name) if m is not None: name = m.group(1) name_to_variable[name] = var init_vars = tf.train.list_variables(init_checkpoint) init_vars_name = [name for (name, _) in init_vars] if num_of_group > 0: assignment_map = [] for gid in range(num_of_group): assignment_map.append(collections.OrderedDict()) else: assignment_map = collections.OrderedDict() for name in name_to_variable: if name in init_vars_name: tvar_name = name elif (re.sub(r"/group_\d+/", "/group_0/",
tensorflow.train.list_variables
36
import tensorflow as tf x = tf.placeholder(tf.float32, [None, shape[0]]) w = tf.Variable(tf.zeros(shape)) b = tf.Variable(tf.zeros(shape[1])) self.x = x self.w = w self.b = b y = tf.nn.softmax(tf.matmul(x, w) + b) y_ = tf.placeholder(tf.float32, [None, shape[1]]) self.y_ = y_ cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]) )
tensorflow.matmul
37
import tensorflow as tf target_label = sess.run(labels) adv = attack_carlini.attack(input_data, target_label) (logits_part_nor, logits_part_adv, labels_part) = sess.run([logits_nor, logits_adv, tf.argmax(labels, 1)], feed_dict={adv_image: adv}) logits_all = np.concatenate((logits_all, logits_part_nor), axis=0)
tensorflow.argmax
38
import tensorflow as tf stddev=0.02, data_format='NDHWC') : with tf.variable_scope(name) : assert(data_format == 'NDHWC') self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=stddev)) self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0)) self.strides = [1,1,1] self.dilates = [d_t, d_h, d_w]
tensorflow.truncated_normal_initializer
39
import tensorflow as tf loss = None with tf.name_scope(name, "softmax_loss",[output]): label_dis = labels / tf.reduce_sum(labels, 1, keep_dims=True) loss = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_dis) * tf.reduce_sum(labels, 1) return tf.reduce_sum(loss) / tf.reduce_sum(labels) def get_normalized_weights(self, propensity):
tensorflow.reduce_sum
40
import tensorflow as tf print('===== Decompress =====') # load model. model = importlib.import_module(model) synthesis_transform = model.SynthesisTransform(latent_points) hyper_encoder = model.HyperEncoder() hyper_decoder = model.HyperDecoder() entropy_bottleneck = EntropyBottleneck() conditional_entropy_model = SymmetricConditional() checkpoint = tf.train.Checkpoint(synthesis_transform=synthesis_transform, hyper_encoder=hyper_encoder, hyper_decoder=hyper_decoder, estimator=entropy_bottleneck) status = checkpoint.restore(tf.train.latest_checkpoint(ckpt_dir)) zs = entropy_bottleneck.decompress(z_strings, z_min_v, z_max_v, z_shape, z_shape[-1]) print("Entropy Decoder (Hyper)") def loop_hyper_deocder(z): z = tf.expand_dims(z, 0) loc, scale = hyper_decoder(z) return tf.squeeze(loc, [0]), tf.squeeze(scale, [0]) locs, scales = tf.map_fn(loop_hyper_deocder, zs, dtype=(tf.float32, tf.float32), parallel_iterations=1, back_prop=False) lower_bound = 1e-9# TODO
tensorflow.train.latest_checkpoint
41
import tensorflow as tf const_var = tf.Variable(tf.constant([8, 6, 7, 5, 3, 0, 9])) const_fill_var = tf.Variable(tf.constant(-1, shape=[row_dim, col_dim])) sess.run(const_var.initializer)
tensorflow.constant
42
import tensorflow as tf def testStrWorksCorrectlyScalar(self): # Usually we'd write np.float(X) here, but a recent Eager bug would # erroneously coerce the value to float32 anyway. We therefore use constants # here, until the bug is resolved in TensorFlow 1.12. normal = tfd.Normal(loc=tf.constant(0, tf.float16), scale=tf.constant(1, tf.float16)) self.assertEqual( str(normal), "tfp.distributions.Normal("
tensorflow.constant
43
import tensorflow as tf '`rightmost_transposed_ndims` and `perm`.') if rightmost_transposed_ndims is not None: rightmost_transposed_ndims = tf.convert_to_tensor( value=rightmost_transposed_ndims, dtype=np.int32, name='rightmost_transposed_ndims') rightmost_transposed_ndims_ = tf.get_static_value( rightmost_transposed_ndims) with tf.control_dependencies(_maybe_validate_rightmost_transposed_ndims( rightmost_transposed_ndims, validate_args)): rightmost_transposed_ndims = tf.identity(rightmost_transposed_ndims) perm = tf.range( start=rightmost_transposed_ndims - 1, limit=-1, delta=-1, name='perm') else: # perm is not None: perm = tf.convert_to_tensor(value=perm, dtype=np.int32, name='perm') rightmost_transposed_ndims = tf.size( input=perm, name='rightmost_transposed_ndims') rightmost_transposed_ndims_ = tf.get_static_value( rightmost_transposed_ndims)
tensorflow.range
44
import tensorflow as tf def hgru_ops(self, i0, x, h2, layer, layer_idx): """hGRU body.""" var_scope = '%s_hgru_weights' % layer # Circuit input receives recurrent output h2 c1, g1 = self.circuit_input( h2=h2, layer=layer, var_scope=var_scope, layer_idx=layer_idx) with tf.variable_scope( '%s/c1_bn' % var_scope, reuse=self.scope_reuse) as scope: c1 = tf.contrib.layers.batch_norm( inputs=c1, scale=True, center=False, fused=True, renorm=False,
tensorflow.variable_scope
45
import tensorflow as tf act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable function to select and action given observation. ` See the top of the file for details. """ if param_noise_filter_func is None: param_noise_filter_func = default_param_noise_filter with tf.variable_scope(scope, reuse=reuse): observations_ph = U.ensure_tf_input(make_obs_ph("observation")) stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic") update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps") update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold") update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale") reset_ph = tf.placeholder(tf.bool, (), name="reset") eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0)) param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False) param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False) # Unmodified Q. q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
tensorflow.placeholder
46
import tensorflow as tf super(TweetSeqModel, self).__init__(batch_size, max_sequence_len, out_vocab_size, c2v, dropout_keep_prob) weights = tf.constant(weights, dtype=tf.float32, name='class_weights') def GetCell(): """Creates an LSTM cell with dropout.""" c = tf.nn.rnn_cell.LSTMCell(hidden_size, use_peepholes=model_params['peepholes'], num_proj=proj_size) if dropout_keep_prob is not None: c = tf.nn.rnn_cell.DropoutWrapper(c, input_keep_prob=dropout_keep_prob) return c # Create the bi-directional LSTM with tf.variable_scope('wordrnn'):
tensorflow.nn.rnn_cell.LSTMCell
47
import tensorflow as tf 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32), 'image/object/class/label': tf.io.VarLenFeature(tf.int64), 'image/object/area': tf.io.VarLenFeature(tf.float32), 'image/object/is_crowd': tf.io.VarLenFeature(tf.int64), } if include_mask: self._keys_to_features.update({ 'image/object/mask': tf.io.VarLenFeature(tf.string), }) def _decode_image(self, parsed_tensors): """Decodes the image and set its static shape.""" image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
tensorflow.io.VarLenFeature
48
import tensorflow as tf total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) monitor_dict["total_loss"] = total_loss return total_loss, new_mems, monitor_dict def get_loss(FLAGS, features, labels, mems, is_training): """Pretraining loss with two-stream attention Transformer-XL.""" if FLAGS.use_bfloat16: with tf.tpu.bfloat16_scope(): return two_stream_loss(FLAGS, features, labels, mems, is_training) else: return two_stream_loss(FLAGS, features, labels, mems, is_training) def get_classification_loss( FLAGS, features, n_class, is_training): """Loss for downstream classification tasks."""
tensorflow.tpu.bfloat16_scope
49
import tensorflow as tf raise Exception("The input dimension must be rank 2") n_in = int(self.inputs.get_shape()[-1]) self.n_units = n_units with tf.variable_scope(name): W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, dtype=LayersConfig.tf_dtype, **W_init_args) b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, dtype=LayersConfig.tf_dtype, **b_init_args) # self.outputs = act(tf.matmul(self.inputs, W) + b) LayersConfig.set_keep[name] = tf.placeholder(tf.float32) W_dropcon = tf.nn.dropout(W, LayersConfig.set_keep[name]) self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b) # self.all_layers = list(layer.all_layers) # self.all_params = list(layer.all_params) # self.all_drop = dict(layer.all_drop) self.all_drop.update({LayersConfig.set_keep[name]: keep}) self.all_layers.append(self.outputs) self.all_params.extend([W, b])
tensorflow.matmul
50
import tensorflow as tf elif isinstance(ob_space, Box): input_shape = (batch_size,) + ob_space.shape input_x = tf.placeholder(shape=input_shape, dtype=ob_space.dtype, name=name) processed_x = tf.to_float(input_x) return input_x, processed_x else:
tensorflow.to_float
51
import tensorflow as tf scope='BoxEncodingPredictor') if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * num_class_slots, [self._kernel_size, self._kernel_size], scope='ClassPredictor', biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) combined_feature_map_shape = shape_utils.combined_static_and_dynamic_shape( image_features) box_encodings = tf.reshape( box_encodings, tf.stack([combined_feature_map_shape[0], combined_feature_map_shape[1] * combined_feature_map_shape[2] *
tensorflow.sigmoid
52
import tensorflow as tf # TODO: move to ops def _rank(x): return len(x.get_shape()) def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True): random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32) binary_mask = tf.floor(random_tensor) if normalize: binary_mask = tf.reciprocal(keep_prob) * binary_mask return binary_mask def _global_keep_prob(keep_prob): keep_prob = tf.convert_to_tensor(keep_prob, dtype=tf.float32) keep_prob = tf.cond(_phase, lambda: keep_prob, lambda: keep_prob * 0.0 + 1.0) return keep_prob def layer(func): class Layer(object): def __init__(self, *args, **kwargs): self.func = func self.args = args self.kwargs = kwargs self.name = self.kwargs.get("name", self.func.__name__)
tensorflow.convert_to_tensor
53
import tensorflow as tf input_props.append((tf.int32, [None])) # Gold ends. input_props.append((tf.int32, [None])) # Cluster ids. self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props] dtypes, shapes = zip(*input_props) queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes) self.enqueue_op = queue.enqueue(self.queue_input_tensors) self.input_tensors = queue.dequeue() self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors) self.global_step = tf.Variable(0, name="global_step", trainable=False) self.reset_global_step = tf.assign(self.global_step, 0) learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step, self.config["decay_frequency"], self.config["decay_rate"], staircase=True) trainable_params = tf.trainable_variables() gradients = tf.gradients(self.loss, trainable_params) gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"]) optimizers = { "adam" : tf.train.AdamOptimizer, "sgd" : tf.train.GradientDescentOptimizer } optimizer = optimizers[self.config["optimizer"]](learning_rate) self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step) def start_enqueue_thread(self, session): with open(self.config["train_path"]) as f: train_examples = [json.loads(jsonline) for jsonline in f.readlines()] def _enqueue_loop():
tensorflow.trainable_variables
54
import tensorflow as tf def call(self, inputs): """Evaluates the QNode on input data using the initialized weights. Args: inputs (tensor): data to be processed Returns: tensor: output data """ if len(tf.shape(inputs)) > 1: # If the input size is not 1-dimensional, unstack the input along its first dimension, # recursively call the forward pass on each of the yielded tensors, and then stack the # outputs back into the correct shape reconstructor = [] for x in tf.unstack(inputs): reconstructor.append(self.call(x)) return tf.stack(reconstructor) return self._evaluate_qnode(inputs) def _evaluate_qnode(self, x): """Evaluates a QNode for a single input datapoint. Args: x (tensor): the datapoint Returns: tensor: output datapoint
tensorflow.unstack
55
import tensorflow as tf # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = utils.smart_cond( is_training, build_update_ops, build_no_ops, ) # Every new connection creates a new op which adds its contribution # to the running average when ran. tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op) tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op) def _build_update_ops_second_moment(self, mean, second_moment, is_training): """Builds the moving average update ops when using the moving second moment. Args: mean: The mean value to update with. second_moment: The second_moment value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. """ def build_update_ops(): """Builds the exponential moving average update ops."""
tensorflow.add_to_collection
56
import tensorflow as tf return_dict["end_top_index"] = end_top_index # an additional layer to predict answerability with tf.variable_scope("answer_class"): # get the representation of CLS cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) # get the representation of START start_p = tf.nn.softmax(start_logits_masked, axis=-1, name="softmax_start") start_feature = tf.einsum("lbh,bl->bh", output, start_p) # note(zhiliny): no dependency on end_feature so that we can obtain # one single `cls_logits` for each sample ans_feature = tf.concat([start_feature, cls_feature], -1) ans_feature = tf.layers.dense( ans_feature, xlnet_config.d_model, activation=tf.tanh, kernel_initializer=initializer, name="dense_0") ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, training=is_training) cls_logits = tf.layers.dense( ans_feature, 1, kernel_initializer=initializer, name="dense_1", use_bias=False) cls_logits = tf.squeeze(cls_logits, -1)
tensorflow.concat
57
import tensorflow.contrib.graph_editor as ge d_xs_new = dv[len(checkpoints_other):] for j in range(len(xs)): if d_xs_new[j] is not None: if d_xs[j] is None: d_xs[j] = _unsparsify(d_xs_new[j]) else: d_xs[j] += _unsparsify(d_xs_new[j]) return d_xs def tf_toposort(ts, within_ops=None): all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops) deps = {} for op in all_ops: for o in op.outputs: deps[o] = set(op.inputs) sorted_ts = toposort(deps) # only keep the tensors from our original list ts_sorted_lists = [] for l in sorted_ts: keep = list(set(l).intersection(ts))
tensorflow.contrib.graph_editor.get_forward_walk_ops
58
import tensorflow as tf def build_trainer(self, child_model): child_model.build_valid_rl() self.valid_acc = (tf.to_float(child_model.valid_shuffle_acc) / tf.to_float(child_model.batch_size)) self.reward = self.valid_acc if self.entropy_weight is not None: self.reward += self.entropy_weight * self.sample_entropy self.sample_log_prob = tf.reduce_sum(self.sample_log_prob) self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False) baseline_update = tf.assign_sub( self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward)) with tf.control_dependencies([baseline_update]): self.reward = tf.identity(self.reward) self.loss = self.sample_log_prob * (self.reward - self.baseline) self.train_step = tf.Variable(0, dtype=tf.int32, trainable=False, name="train_step") tf_variables = [var for var in tf.trainable_variables() if var.name.startswith(self.name)] print("-" * 80)
tensorflow.assign_sub
59
import tensorflow as tf def deconv2d(input_, output_shape, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, name="deconv2d", with_w=False): with tf.variable_scope(name): # filter : [height, width, output_channels, in_channels] w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
tensorflow.variable_scope
60
import tensorflow as tf vz_keys = tf.reshape(tf.Variable([], collections=[], dtype=tf.string), (-1, 1)) x_t = tf.gather(x, l) x_t_len = tf.strings.length(x_t) x_t = tf.string_split([x_t], delimiter='').values z_t = tf.gather(y, m) z_t_len = tf.strings.length(z_t) z_t = tf.string_split([z_t], delimiter='').values for i in tf.range(start=0, limit=x_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(x_t[i:i + self._p], '') vx_keys, r = tf.cond( tf.greater(vx.lookup(u), -1), true_fn=lambda: (vx_keys, tf.add(vx.lookup(u), 1)), false_fn=lambda: (tf.concat([vx_keys, tf.reshape(u, (-1, 1))], axis=0), tf.constant(1, dtype=tf.int64, name='constant')) ) vx.insert(u, r) for i in tf.range(start=0, limit=z_t_len - self._p + 1, delta=1, dtype=None, name='range'): u = tf.string_join(z_t[i:i + self._p], '')
tensorflow.string_join
61
import tensorflow as tf # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"])) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1") deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
tensorflow.shape
62
from tensorflow.python.framework import ops name or 'root_mean_squared_error') root_mean_squared_error = math_ops.sqrt(value_tensor) with ops.control_dependencies([update_op]): update_op = math_ops.sqrt(update_op)
tensorflow.python.framework.ops.control_dependencies
63
import tensorflow as tf TRAIN_STEPS = 1 CONFIG = tf.ConfigProto(device_count={"GPU": 0}) class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase): def setUp(self): tf.reset_default_graph() # Import MNIST dataset self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) # Define constants # Unrolled through 28 time steps self.time_steps = 28 # Rows of 28 pixels
tensorflow.reset_default_graph
64
import tensorflow as tf # Date: 2018/12/22 下午4:06 # 10.3 TensorFlow的并发执行 # 1. 为了能够找到TensorFlow的什么操作正在使用什么设备,我们在计算图会话中传入一个config参数,将log_device_placement设为True。当我们在命令行运行脚本时,会看到指定设备输出 import tensorflow as tf sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) a = tf.constant_initializer([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') c = tf.matmul(a, b) # Runs the op. print(sess.run(c)) # 2. 从控制台运行下面的命令
tensorflow.constant_initializer
65
import tensorflow as tf param_eta = tf.placeholder(dtype=tf.float32, shape=[], name="param_eta")
tensorflow.placeholder
66
import tensorflow as tf tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = ""
tensorflow.train.init_from_checkpoint
67
import tensorflow as tf # Creates a graph. v0 = tf.Variable(0.0) var = tf.Variable(10.0) tf.add(v0, var) @function.Defun(x=tf.float32) def minus_one(x): return x - 1 minus_one(tf.identity(v0)) save = tf.train.Saver({"v0": v0}) tf.initialize_all_variables() # Generates MetaGraphDef. meta_graph_def = save.export_meta_graph() ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
tensorflow.identity
68
import tensorflow as tf tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session() as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def testInt64(self):
tensorflow.Variable
69
import tensorflow as tf def to_ids(example): sentence = tf.reshape(example['tokens'], shape=[1]) words = tf.strings.split(sentence, sep=' ').values truncated_words = words[:max_seq_len]
tensorflow.strings.split
70
import tensorflow as tf tf.summary.scalar('cross_entropy_loss', cross_entropy) loc_loss = tf.cond(n_positives > 0., lambda: modified_smooth_l1(location_pred, tf.stop_gradient(gtargets), sigma=1.), lambda: tf.zeros_like(location_pred)) #loc_loss = modified_smooth_l1(location_pred, tf.stop_gradient(gtargets)) loc_loss = tf.reduce_mean(tf.reduce_sum(loc_loss, axis=-1)) loc_loss = tf.identity(loc_loss, name='location_loss') tf.summary.scalar('location_loss', loc_loss) tf.losses.add_loss(loc_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = cross_entropy + loc_loss + params['weight_decay'] * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['learning_rate'] * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [int(_) for _ in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype))
tensorflow.nn.l2_loss
71
import tensorflow as tf return model_utils.update_exponential_moving_average( rl_step_baseline, momentum=rl_baseline_momentum) rl_baseline = update_rl_baseline() rl_advantage = rl_reward - rl_baseline rl_empirical_loss = -tf.stop_gradient(rl_advantage) * log_prob rl_entropy_loss = -rl_entropy_regularization * rl_entropy enable_rl_optimizer = tf.cast( tf.greater_equal(target_global_step, FLAGS.first_pretrain_steps), tf.float32) rl_learning_rate = FLAGS.rl_learning_rate * enable_rl_optimizer rl_learning_rate = tf.train.piecewise_constant( target_global_step, [800,], [rl_learning_rate, rl_learning_rate * 0.1]) optimizer = tf.train.AdamOptimizer(rl_learning_rate) target_train_op = optimizer.minimize( rl_empirical_loss, target_global_step,
tensorflow.greater_equal
72
import tensorflow as tf decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to TensorFlow's every time we want to use a std function that handles bounding boxes. Args: bboxes: A Tensor of shape (total_bboxes, 4) Returns: bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped. """ with tf.name_scope('BoundingBoxTransform/change_order'): first_min, second_min, first_max, second_max = tf.unstack( bboxes, axis=1 ) bboxes = tf.stack( [second_min, first_min, second_max, first_max], axis=1 ) return bboxes
tensorflow.name_scope
73
import tensorflow as tf variables_averages_op = variable_averages.apply(tf.trainable_variables()) # batch norm updates with tf.control_dependencies([variables_averages_op, apply_gradient_op, batch_norm_updates_op]): train_op = tf.no_op(name='train_op') saver = tf.train.Saver(tf.global_variables()) summary_writer = tf.summary.FileWriter(FLAGS.checkpoint_path, tf.get_default_graph()) init = tf.global_variables_initializer() if FLAGS.pretrained_model_path is not None: variable_restore_op = slim.assign_from_checkpoint_fn(FLAGS.pretrained_model_path, slim.get_trainable_variables(), ignore_missing_vars=True) config = tf.ConfigProto() custom_op = config.graph_options.rewrite_options.custom_optimizers.add() custom_op.name = "NpuOptimizer" custom_op.parameter_map["use_off_line"].b = True # 在昇腾AI处理器执行训练 config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 关闭remap开关 if FLAGS.allow_mix_precision: custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision") if FLAGS.auto_tune: custom_op.parameter_map["auto_tune_mode"].s = tf.compat.as_bytes("RL,GA") with tf.Session(config=config) as sess: if FLAGS.restore: print('continue training from previous checkpoint') ckpt = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
tensorflow.ConfigProto
74
import tensorflow as tf input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_images')
tensorflow.placeholder
75
import tensorflow as tf (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
tensorflow.train.init_from_checkpoint
76
import tensorflow as tf if coeff <= 0: return y x = np.zeros(y.shape[0], dtype=np.float32) x[0] = y[0] for n in range(1, y.shape[0], 1): x[n] = coeff * x[n - 1] + y[n] return x def read_and_decode(filename_queue, canvas_size, preemph=0.): reader = tf.TFRecordReader() _, serialized_example = reader.read(filename_queue) features = tf.parse_single_example( serialized_example, features={ 'wav_raw': tf.FixedLenFeature([], tf.string), 'noisy_raw': tf.FixedLenFeature([], tf.string), }) wave = tf.decode_raw(features['wav_raw'], tf.int32)
tensorflow.TFRecordReader
77
import tensorflow as tf tf.equal(phase, 'train'), datasets.train.get_next, datasets.test.get_next) if not isinstance(data, dict): data = {'data': data} if 'length' not in data: example = data[list(data.keys())[0]] data['length'] = ( tf.zeros((tf.shape(example)[0],), tf.int32) + tf.shape(example)[1]) return data def train(model_fn, datasets, logdir, config): """Train a model on a datasets. The model function receives the following arguments: data batch, trainer
tensorflow.shape
78
import tensorflow as tf resnet_size=18, num_classes=class_num, mode='se', data_format=None) inputs= network(inputs=inputs, is_training=training) feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat') inputs = tf.layers.dense(inputs=inputs, units=class_num) # inputs = tf.layers.dense(inputs=feat, units=class_num) inputs = tf.identity(inputs, 'final_dense') return inputs, feat # image_size = 32, img_channels = 3, class_num = 10 in cifar10 x = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels]) label = tf.placeholder(tf.float32, shape=[None,]) one_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num) training_flag = tf.placeholder(tf.bool) learning_rate = tf.placeholder(tf.float32, name='learning_rate') logits, feat = resnet_model_fn(x, training=training_flag) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))
tensorflow.placeholder
79
import tensorflow as tf # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops def conv(inpOp, nIn, nOut, kH, kW, dH, dW, padType, name, phase_train=True, use_batch_norm=True, weight_decay=0.0): with tf.variable_scope(name): l2_regularizer = lambda t: l2_loss(t, weight=weight_decay) kernel = tf.get_variable("weights", [kH, kW, nIn, nOut], initializer=tf.truncated_normal_initializer(stddev=1e-1), regularizer=l2_regularizer, dtype=inpOp.dtype) cnv = tf.nn.conv2d(inpOp, kernel, [1, dH, dW, 1], padding=padType) if use_batch_norm: conv_bn = batch_norm(cnv, phase_train) else: conv_bn = cnv biases = tf.get_variable("biases", [nOut], initializer=tf.constant_initializer(), dtype=inpOp.dtype) bias = tf.nn.bias_add(conv_bn, biases) conv1 = tf.nn.relu(bias) return conv1
tensorflow.variable_scope
80
import tensorflow as tf tf.app.flags.DEFINE_string('eval_data_path', '', 'Filepattern for eval data') tf.app.flags.DEFINE_string('train_dir', '', 'Directory to keep training outputs.') tf.app.flags.DEFINE_string('eval_dir', '', 'Directory to keep eval outputs.') tf.app.flags.DEFINE_integer('eval_batch_count', 10, 'Number of batches to eval.') tf.app.flags.DEFINE_bool('eval_once', False, 'Whether evaluate the model only once.') tf.app.flags.DEFINE_string('log_root', '', 'Directory to keep the checkpoints. Should be a ' 'parent directory of FLAGS.train_dir/eval_dir.') tf.app.flags.DEFINE_integer('num_gpus', 0, 'Number of gpus used for training. (0 or 1)') tf.app.flags.DEFINE_integer('num_residual_units', 5,
tensorflow.app.flags.DEFINE_bool
81
import tensorflow as tf l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def valid_inference(self,images): images=tf.cast(images,tf.float32)/255.0 l1 = tf.matmul(images, self.w1)+self.b1 l1=tf.nn.relu(l1) l2 = tf.matmul(l1, self.w2)+self.b2 l2=tf.nn.relu(l2) l3=tf.matmul(l2, self.w3)+self.b3 l3=tf.nn.relu(l3) out=tf.matmul(l3, self.w4)+self.b4 return out def softmax_loss(self,predicts,labels): predicts=tf.nn.softmax(predicts) labels=tf.one_hot(labels,classnum) loss=-tf.reduce_sum(labels*tf.log(predicts)) return loss
tensorflow.matmul
82
import tensorflow as tf row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm)
tensorflow.maximum
83
from tensorflow.python.framework import ops """ with ops.op_scope([value, bias], name, "BiasAddV1") as name: value = ops.convert_to_tensor(value, name="input") bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias") return gen_nn_ops._bias_add_v1(value, bias, name=name) ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape) ops.RegisterShape("BiasAddGradV1")(common_shapes.bias_add_grad_shape) def relu6(features, name=None): """Computes Rectified Linear 6: `min(max(features, 0), 6)`. Args: features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, `int16`, or `int8`.
tensorflow.python.framework.ops.RegisterShape
84
from tensorflow.python.ops import math_ops # Create slots for the global solution. for v in var_list: self._zeros_slot(v, "vstar", self._name) self._zeros_slot(v, "gold", self._name) def _apply_dense(self, grad, var): lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype) mu_t = math_ops.cast(self._mu_t, var.dtype.base_dtype) vstar = self.get_slot(var, "vstar") gold = self.get_slot(var, "gold")
tensorflow.python.ops.math_ops.cast
85
import tensorflow as tf if modality.top_is_pointwise: return tf.squeeze(logits, axis=[1, 2, 3]) # -1 due to the pad above. current_output_position = common_layers.shape_list(ids)[1] - 1 logits = logits[:, current_output_position, :, :] return tf.squeeze(logits, axis=[1, 2]) initial_ids = tf.zeros([batch_size], dtype=tf.int32) if self.has_input:
tensorflow.squeeze
86
import tensorflow as tf x = tf.transpose(x, perm=[3, 1, 2, 0]) # (batch_size, num_nodes, input_size, order) x = tf.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) weights = tf.get_variable( 'weights', [input_size * num_matrices, output_size], dtype=dtype, initializer=tf.contrib.layers.xavier_initializer()) x = tf.matmul( x, weights) # (batch_size * self._num_nodes, output_size) biases = tf.get_variable("biases", [output_size], dtype=dtype, initializer=tf.constant_initializer( bias_start, dtype=dtype)) x = tf.nn.bias_add(x, biases) # Reshape res back to: (batch_size, num_node, state_dim) return tf.reshape(x, [batch_size, self._num_nodes, output_size])
tensorflow.constant_initializer
87
import tensorflow as tf print('logit: {}'.format(logits.get_shape())) # Compute loss if mode != 'gen': neg_log_lhoods = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets) if target_weight_strategy == 'rect': avg_neg_log_lhood = tf.reduce_mean(neg_log_lhoods) else: neg_log_lhoods = tf.multiply(neg_log_lhoods, target_weights) # be careful to have at least one weight be nonzero # should we be taking the mean elem-wise by batch? i think this is a big bug avg_neg_log_lhood = tf.reduce_sum(neg_log_lhoods) / tf.reduce_sum(target_weights) neg_log_lhoods_inspect = tf.reshape(neg_log_lhoods, [batch_size, rnn_nunroll]) # Train op if mode == 'train': lr = tf.Variable(0.0, trainable=False) self._lr = lr self._lr_summary = tf.summary.scalar('learning_rate', self._lr) tvars = tf.trainable_variables() grads = tf.gradients(avg_neg_log_lhood, tvars)
tensorflow.reduce_sum
88
import tensorflow as tf for i in range (dim): dg_i = tf.gradients(flat_grads[i], par) #for each element of grads evaluate the gradients dg_i_flat = flatten(dg_i) #flatten the resulting hessian onto a 1 d array
tensorflow.gradients
89
import tensorflow as tf return choices samples = multinomial_squeeze(logits, self.hparams.sampling_temp) return samples, logits, losses def _shard_features(self, features): # pylint: disable=missing-docstring sharded_features = dict() for k, v in six.iteritems(features): v = tf.convert_to_tensor(v) v_shape = common_layers.shape_list(v) if not v_shape: v = tf.expand_dims(v, axis=-1) v_shape = [1] if v_shape == [1]: v = tf.tile(v, [self._num_datashards]) sharded_features[k] = self._data_parallelism(tf.identity, tf.split( v, self._num_datashards, 0)) return sharded_features def _to_features_per_datashard(self, features): datashard_features = []
tensorflow.expand_dims
90
import tensorflow as tf random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64) chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions) stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions, lambda: deterministic_actions) output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions) update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps)) updates = [ update_eps_expr, tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"), lambda: tf.group(*[])), tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)), update_param_noise_thres_expr, ]
tensorflow.cond
91
import tensorflow as tf return tf.clip_by_value(combined_idx, 0, 9) def get_slow_antecedent_scores(self, top_span_emb, top_antecedents, top_antecedent_emb, top_antecedent_offsets, top_span_speaker_ids, genre_emb): k = util.shape(top_span_emb, 0) c = util.shape(top_antecedents, 1) feature_emb_list = [] if self.config["use_metadata"]: top_antecedent_speaker_ids = tf.gather(top_span_speaker_ids, top_antecedents) # [k, c] same_speaker = tf.equal(tf.expand_dims(top_span_speaker_ids, 1), top_antecedent_speaker_ids) # [k, c] speaker_pair_emb = tf.gather(tf.get_variable("same_speaker_emb", [2, self.config["feature_size"]]), tf.to_int32(same_speaker)) # [k, c, emb] feature_emb_list.append(speaker_pair_emb) tiled_genre_emb = tf.tile(tf.expand_dims(tf.expand_dims(genre_emb, 0), 0), [k, c, 1]) # [k, c, emb] feature_emb_list.append(tiled_genre_emb) if self.config["use_features"]: antecedent_distance_buckets = self.bucket_distance(top_antecedent_offsets) # [k, c] antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c] feature_emb_list.append(antecedent_distance_emb)
tensorflow.get_variable
92
import tensorflow as tf if data_format_ == 'NHWC': inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.) x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs cpn_backbone = cpn.cascaded_pyramid_net if 'seresnext50' in FLAGS.backbone: cpn_backbone = cpn.xt_cascaded_pyramid_net def keypoint_model_fn(features, labels, mode, params):
tensorflow.nn.depthwise_conv2d
93
import tensorflow as tf start_x = tf.random.uniform(shape=(1,), minval=0, maxval=im_width, dtype=tf.int32) start_y = tf.random.uniform(shape=(1,), minval=0, maxval=im_height, dtype=tf.int32) mask = tf.pad(mask, [[cutout_size + start_y[0], im_height - start_y[0]], [cutout_size + start_x[0], im_width - start_x[0]]]) mask = mask[cutout_size: cutout_size + im_height, cutout_size: cutout_size + im_width] mask = tf.tile(tf.reshape(mask, (im_height, im_width, 1)), (1, 1, 3)) image = tf.where(tf.equal(mask, 0), x=image, y=tf.zeros_like(image)) return image def _add_drop_path(self, X, keep_prob): with tf.variable_scope('drop_path'):
tensorflow.reshape
94
import tensorflow as tf def regularization(self): return self.model_lam * ( self.model_prob * tf.reduce_sum(tf.square(self.model_W)) + tf.reduce_sum(tf.square(self.model_b))
tensorflow.square
95
import tensorflow.contrib as contrib dropout3_1 = contrib.layers.dropout(stitch3_1, keep_prob=keep_prob, is_training=is_training, scope="dropout3_1") dropout3_2 = contrib.layers.dropout(stitch3_2, keep_prob=keep_prob, is_training=is_training, scope="dropout3_2")
tensorflow.contrib.layers.dropout
96
import tensorflow as tf """Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation. """ with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') return tf.group(
tensorflow.name_scope
97
import tensorflow as tf if not white: q_mu = tf.matrix_triangular_solve(Luu, q_mu, lower=True) Luu_tiled = tf.tile(Luu[None, :, :], [num_func, 1, 1]) # remove line once issue 216 is fixed q_sqrt_r = tf.matrix_triangular_solve(Luu_tiled, q_sqrt_r, lower=True) Li_eKuf = tf.matrix_triangular_solve(Luu, eKuf, lower=True) # M x N fmean = tf.matmul(Li_eKuf, q_mu, transpose_a=True) eKff = expectation(pXnew, kern) # N (psi0) eKuffu = expectation(pXnew, (kern, feat), (kern, feat)) # N x M x M (psi2) Luu_tiled = tf.tile(Luu[None, :, :], [num_data, 1, 1]) # remove this line, once issue 216 is fixed
tensorflow.matrix_triangular_solve
98
import tensorflow as tf d *= noise z = tf.layers.dense(d, final_filters, name="unbottleneck") return layer + z, 0.0
tensorflow.layers.dense
99
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
51
Edit dataset card