project
stringlengths
1
98
commit_sha
stringlengths
40
40
parent_sha
stringlengths
40
40
file_path
stringlengths
4
209
project_url
stringlengths
23
132
likely_bug
bool
1 class
comodified
bool
1 class
in_function
bool
2 classes
diff
stringlengths
27
9.71k
before
stringlengths
1
8.91k
after
stringlengths
1
6k
sstub_pattern
stringclasses
23 values
edit_script
stringlengths
33
158k
key
stringlengths
45
154
commit_message
stringlengths
3
65.5k
files
list
tensor2tensor
dd0e103c5a59dbf97531483e0ed090414bb7ed77
b94f2e17339dd24067a03071848151cc4c23df13
tensor2tensor/bin/t2t_decoder.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -35,7 +35,7 @@ import os # Fathom import fathomt2t -from fathomt2t.common_flags import setup_datset_flag, dataset_to_t2t_mode +from fathomt2t.common_flags import setup_dataset_flag, dataset_to_t2t_mode from fathomairflow.dags.dag_management.xcom_manipulation import echo_yaml_for_xcom_ingest # Dependency imports
from fathomt2t . common_flags import setup_datset_flag , dataset_to_t2t_mode
from fathomt2t . common_flags import setup_dataset_flag , dataset_to_t2t_mode
CHANGE_IDENTIFIER_USED
[["Update", ["identifier:setup_datset_flag", 3, 36, 3, 53], "setup_dataset_flag"]]
medicode/tensor2tensor@dd0e103c5a59dbf97531483e0ed090414bb7ed77
fix
[ { "sha": "28936f5c925d6eaf772ef11abdcec78fac08effc", "filename": "tensor2tensor/bin/t2t_decoder.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/dd0e103c5a59dbf97531483e0ed090414bb7ed77/tensor2tensor%2Fbin%2Ft2t_decoder.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/dd0e103c5a59dbf97531483e0ed090414bb7ed77/tensor2tensor%2Fbin%2Ft2t_decoder.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fbin%2Ft2t_decoder.py?ref=dd0e103c5a59dbf97531483e0ed090414bb7ed77", "patch": "@@ -35,7 +35,7 @@\n \n # Fathom\n import fathomt2t\n-from fathomt2t.common_flags import setup_datset_flag, dataset_to_t2t_mode\n+from fathomt2t.common_flags import setup_dataset_flag, dataset_to_t2t_mode\n from fathomairflow.dags.dag_management.xcom_manipulation import echo_yaml_for_xcom_ingest\n \n # Dependency imports" } ]
tensor2tensor
b7b448da238e2dcc6755b8d7caecf8f6bf93b9b8
5124b5178cc8794bc6e9080e2c4bb262a797d236
tensor2tensor/models/distillation.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -28,7 +28,7 @@ import tensorflow as tf - First, a teacher is train on a task; Second, a student is trained to perform + First, a teacher is trained on a task; Second, a student is trained to perform the task while matching the teacher's softened outputs. For more details, see the paper below.
First , a teacher is train
First , a teacher is trained
CHANGE_IDENTIFIER_USED
[["Update", ["identifier:train", 1, 23, 1, 28], "trained"]]
medicode/tensor2tensor@b7b448da238e2dcc6755b8d7caecf8f6bf93b9b8
Fixed typo. PiperOrigin-RevId: 205127272
[ { "sha": "839501a95d7b63a8e5bcf572d0a77724241ec17d", "filename": "tensor2tensor/models/distillation.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/b7b448da238e2dcc6755b8d7caecf8f6bf93b9b8/tensor2tensor%2Fmodels%2Fdistillation.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/b7b448da238e2dcc6755b8d7caecf8f6bf93b9b8/tensor2tensor%2Fmodels%2Fdistillation.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fdistillation.py?ref=b7b448da238e2dcc6755b8d7caecf8f6bf93b9b8", "patch": "@@ -28,7 +28,7 @@\n class Distillation(t2t_model.T2TModel):\n \"\"\"Distillation from a teacher to student network.\n \n- First, a teacher is train on a task; Second, a student is trained to perform\n+ First, a teacher is trained on a task; Second, a student is trained to perform\n the task while matching the teacher's softened outputs. For more details, see\n the paper below.\n " } ]
tensor2tensor
c29de87c6951adb1ef7b5986e70f898e0d335a19
70b46c3ae8196d050549e87c7f6aa90574e5afb8
tensor2tensor/data_generators/multi_problem.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -109,6 +109,6 @@ class MultiProblem(problem.Problem): else: single_mtl_dataset = datasets[0] for data in datasets[1:]: - single_mtl_dataset.concatenate(data) + single_mtl_dataset = single_mtl_dataset.concatenate(data) return single_mtl_dataset
single_mtl_dataset . concatenate ( data )
single_mtl_dataset = single_mtl_dataset . concatenate ( data )
SINGLE_STMT
[["Insert", ["expression_statement", 3, 9, 3, 45], ["assignment", "N0"], 0], ["Insert", "N0", ["identifier:single_mtl_dataset", "T"], 0], ["Insert", "N0", ["=:=", "T"], 1], ["Move", "N0", ["call", 3, 9, 3, 45], 2]]
medicode/tensor2tensor@c29de87c6951adb1ef7b5986e70f898e0d335a19
Fixing data concatenation in MultiProblem eval. PiperOrigin-RevId: 205769079
[ { "sha": "5a061155337458f868ccc98fcb5b3bd76443c4af", "filename": "tensor2tensor/data_generators/multi_problem.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/c29de87c6951adb1ef7b5986e70f898e0d335a19/tensor2tensor%2Fdata_generators%2Fmulti_problem.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/c29de87c6951adb1ef7b5986e70f898e0d335a19/tensor2tensor%2Fdata_generators%2Fmulti_problem.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fmulti_problem.py?ref=c29de87c6951adb1ef7b5986e70f898e0d335a19", "patch": "@@ -109,6 +109,6 @@ def dataset(self,\n else:\n single_mtl_dataset = datasets[0]\n for data in datasets[1:]:\n- single_mtl_dataset.concatenate(data)\n+ single_mtl_dataset = single_mtl_dataset.concatenate(data)\n \n return single_mtl_dataset" } ]
tensor2tensor
55ad83230d946a8027b8bee57a537de78896a3e4
d5b748caac8fcba6adffaa2a9c95c807ed6dbaea
tensor2tensor/data_generators/cifar.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -92,7 +92,7 @@ def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): for filename in data_files: path = os.path.join(tmp_dir, prefix, filename) with tf.gfile.Open(path, "rb") as f: - data = cPickle.load(f) + data = cPickle.load(f, encoding='latin1') images = data["data"] num_images = images.shape[0] images = images.reshape((num_images, 3, image_size, image_size))
data = cPickle . load ( f )
data = cPickle . load ( f , encoding = 'latin1' )
SAME_FUNCTION_MORE_ARGS
[["Insert", ["argument_list", 3, 26, 3, 29], [",:,", "T"], 2], ["Insert", ["argument_list", 3, 26, 3, 29], ["keyword_argument", "N0"], 3], ["Insert", "N0", ["identifier:encoding", "T"], 0], ["Insert", "N0", ["=:=", "T"], 1], ["Insert", "N0", ["string:'latin1'", "T"], 2]]
medicode/tensor2tensor@55ad83230d946a8027b8bee57a537de78896a3e4
Update cifar.py (#935) With `Python 3.6.5` the following exception will be thrown: ``` File "/home/lhlmgr/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cifar.py", line 146, in cifar_generator data = cPickle.load(f) UnicodeDecodeError: 'ascii' codec can't decode byte 0xff in position 0: ordinal not in range(128) ``` Adding the the parameter `encoding='latin1'` fixes this error for `cifar10` and `cifar100`
[ { "sha": "efd36bb84f699104e227bc14be94ac4bd48cd64a", "filename": "tensor2tensor/data_generators/cifar.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/55ad83230d946a8027b8bee57a537de78896a3e4/tensor2tensor%2Fdata_generators%2Fcifar.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/55ad83230d946a8027b8bee57a537de78896a3e4/tensor2tensor%2Fdata_generators%2Fcifar.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fcifar.py?ref=55ad83230d946a8027b8bee57a537de78896a3e4", "patch": "@@ -92,7 +92,7 @@ def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):\n for filename in data_files:\n path = os.path.join(tmp_dir, prefix, filename)\n with tf.gfile.Open(path, \"rb\") as f:\n- data = cPickle.load(f)\n+ data = cPickle.load(f, encoding='latin1')\n images = data[\"data\"]\n num_images = images.shape[0]\n images = images.reshape((num_images, 3, image_size, image_size))" } ]
tensor2tensor
9e6d737f066596ebaf1ba981bc5fa384e9b1bbc7
814c4a13e1b03e3db444b31be2e6337c02c11bc7
tensor2tensor/data_generators/video_utils.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -64,7 +64,7 @@ def summarize_video_metrics(hook_args): summary_values = [] for name, array in six.iteritems(metrics_results): for ind, val in enumerate(array): - tag = name + "_" + str(ind) + tag = "metric_{}/{}".format(name, ind) summary_values.append(tf.Summary.Value(tag=tag, simple_value=val)) return summary_values
tag = name + "_" + str ( ind )
tag = "metric_{}/{}" . format ( name , ind )
SINGLE_STMT
[["Move", ["assignment", 3, 7, 3, 34], ["call", 3, 26, 3, 34], 2], ["Insert", ["call", 3, 26, 3, 34], ["attribute", "N0"], 0], ["Insert", "N0", ["string:\"metric_{}/{}\"", "T"], 0], ["Insert", "N0", [".:.", "T"], 1], ["Insert", "N0", ["identifier:format", "T"], 2], ["Move", ["argument_list", 3, 29, 3, 34], ["identifier:name", 3, 13, 3, 17], 1], ["Insert", ["argument_list", 3, 29, 3, 34], [",:,", "T"], 2], ["Delete", ["+:+", 3, 18, 3, 19]], ["Delete", ["string:\"_\"", 3, 20, 3, 23]], ["Delete", ["binary_operator", 3, 13, 3, 23]], ["Delete", ["+:+", 3, 24, 3, 25]], ["Delete", ["identifier:str", 3, 26, 3, 29]], ["Delete", ["binary_operator", 3, 13, 3, 34]]]
medicode/tensor2tensor@9e6d737f066596ebaf1ba981bc5fa384e9b1bbc7
fixing the hierarchy of video metrics in TB. PiperOrigin-RevId: 206528847
[ { "sha": "11471d0897cecbf97f7823ed31b10380f65aad02", "filename": "tensor2tensor/data_generators/video_utils.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/9e6d737f066596ebaf1ba981bc5fa384e9b1bbc7/tensor2tensor%2Fdata_generators%2Fvideo_utils.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/9e6d737f066596ebaf1ba981bc5fa384e9b1bbc7/tensor2tensor%2Fdata_generators%2Fvideo_utils.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fvideo_utils.py?ref=9e6d737f066596ebaf1ba981bc5fa384e9b1bbc7", "patch": "@@ -64,7 +64,7 @@ def summarize_video_metrics(hook_args):\n summary_values = []\n for name, array in six.iteritems(metrics_results):\n for ind, val in enumerate(array):\n- tag = name + \"_\" + str(ind)\n+ tag = \"metric_{}/{}\".format(name, ind)\n summary_values.append(tf.Summary.Value(tag=tag, simple_value=val))\n return summary_values\n " } ]
tensor2tensor
6a595f1b3dc1d47c758a2c1674a5bda6b953ad66
0c1b7ea2ac9858417ff3332aba3ae6fe0dc6394a
tensor2tensor/models/research/transformer_vae.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -687,7 +687,7 @@ def transformer_ae_small(): hparams.add_hparam("noise_dev", 0.5) hparams.add_hparam("d_mix", 0.5) hparams.add_hparam("logit_normalization", True) - hparams.add_hparam("word_dropout", 0.1) + hparams.add_hparam("word_dropout", 0.0) # Bottleneck kinds supported: dense, vae, semhash, gumbel-softmax, dvq. hparams.add_hparam("bottleneck_kind", "semhash") hparams.add_hparam("num_blocks", 1)
hparams . add_hparam ( "word_dropout" , 0.1 )
hparams . add_hparam ( "word_dropout" , 0.0 )
CHANGE_NUMERIC_LITERAL
[["Update", ["float:0.1", 3, 38, 3, 41], "0.0"]]
medicode/tensor2tensor@6a595f1b3dc1d47c758a2c1674a5bda6b953ad66
Change word dropout default to 0. PiperOrigin-RevId: 206776309
[ { "sha": "768ed4f16eb4cf74984455b31a53565bb034b702", "filename": "tensor2tensor/models/research/transformer_vae.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/6a595f1b3dc1d47c758a2c1674a5bda6b953ad66/tensor2tensor%2Fmodels%2Fresearch%2Ftransformer_vae.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/6a595f1b3dc1d47c758a2c1674a5bda6b953ad66/tensor2tensor%2Fmodels%2Fresearch%2Ftransformer_vae.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Ftransformer_vae.py?ref=6a595f1b3dc1d47c758a2c1674a5bda6b953ad66", "patch": "@@ -687,7 +687,7 @@ def transformer_ae_small():\n hparams.add_hparam(\"noise_dev\", 0.5)\n hparams.add_hparam(\"d_mix\", 0.5)\n hparams.add_hparam(\"logit_normalization\", True)\n- hparams.add_hparam(\"word_dropout\", 0.1)\n+ hparams.add_hparam(\"word_dropout\", 0.0)\n # Bottleneck kinds supported: dense, vae, semhash, gumbel-softmax, dvq.\n hparams.add_hparam(\"bottleneck_kind\", \"semhash\")\n hparams.add_hparam(\"num_blocks\", 1)" } ]
tensor2tensor
1e628646e44220c8a9f433d661fa8ad9605effdd
e88fa1a0273181b97c6a899eb3bdd57a105c1721
tensor2tensor/bin/t2t_decoder.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -129,7 +129,7 @@ def decode(estimator, hparams, decode_hp): problem = registry.problem(FLAGS.problems) problem.output_predictions( predictions=predictions, - max_num_examples=FLAGS.num_examples) + num_examples=FLAGS.num_examples) def score_file(filename):
problem . output_predictions ( predictions = predictions , max_num_examples = FLAGS . num_examples )
problem . output_predictions ( predictions = predictions , num_examples = FLAGS . num_examples )
CHANGE_KEYWORD_ARGUMENT_USED
[["Update", ["identifier:max_num_examples", 3, 11, 3, 27], "num_examples"]]
medicode/tensor2tensor@1e628646e44220c8a9f433d661fa8ad9605effdd
fix
[ { "sha": "a4b1e3a19b35491ed3050cab9d6a06eab57e22dc", "filename": "tensor2tensor/bin/t2t_decoder.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/1e628646e44220c8a9f433d661fa8ad9605effdd/tensor2tensor%2Fbin%2Ft2t_decoder.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/1e628646e44220c8a9f433d661fa8ad9605effdd/tensor2tensor%2Fbin%2Ft2t_decoder.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fbin%2Ft2t_decoder.py?ref=1e628646e44220c8a9f433d661fa8ad9605effdd", "patch": "@@ -129,7 +129,7 @@ def decode(estimator, hparams, decode_hp):\n problem = registry.problem(FLAGS.problems)\n problem.output_predictions(\n predictions=predictions,\n- max_num_examples=FLAGS.num_examples)\n+ num_examples=FLAGS.num_examples)\n \n \n def score_file(filename):" } ]
tensor2tensor
9786320b8a5d5122c43e191553f70b3b45d73efc
1a754af96bfdfd6015c7763374c0d5910a93dbf2
tensor2tensor/models/research/next_frame_savp.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -175,7 +175,7 @@ class NextFrameSAVP(next_frame_sv2p.NextFrameStochastic): done_warm_start = step > context_frames - 1 groundtruth_items = [image] generated_items = [pred_image] - input_image = self.get_scheduled_sample_inputs( + input_image, = self.get_scheduled_sample_inputs( done_warm_start, groundtruth_items, generated_items, ss_func) all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
input_image = self . get_scheduled_sample_inputs ( done_warm_start , groundtruth_items , generated_items , ss_func )
input_image , = self . get_scheduled_sample_inputs ( done_warm_start , groundtruth_items , generated_items , ss_func )
SINGLE_STMT
[["Insert", ["assignment", 3, 9, 4, 74], ["pattern_list", "N0"], 0], ["Move", "N0", ["identifier:input_image", 3, 9, 3, 20], 0], ["Insert", "N0", [",:,", "T"], 1]]
medicode/tensor2tensor@9786320b8a5d5122c43e191553f70b3b45d73efc
Fix SAVP test. PiperOrigin-RevId: 207041279
[ { "sha": "1e4883291f3bc26e1ea9a997b1bbbeb8ac31d10a", "filename": "tensor2tensor/models/research/next_frame_savp.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/9786320b8a5d5122c43e191553f70b3b45d73efc/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_savp.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/9786320b8a5d5122c43e191553f70b3b45d73efc/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_savp.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_savp.py?ref=9786320b8a5d5122c43e191553f70b3b45d73efc", "patch": "@@ -175,7 +175,7 @@ def construct_model(self, images, actions, rewards):\n done_warm_start = step > context_frames - 1\n groundtruth_items = [image]\n generated_items = [pred_image]\n- input_image = self.get_scheduled_sample_inputs(\n+ input_image, = self.get_scheduled_sample_inputs(\n done_warm_start, groundtruth_items, generated_items, ss_func)\n \n all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)" } ]
tensor2tensor
ea53aee4b4fa0c28421dbef64f544be08682f4af
51e611760ee3d87f3adbd8d5efe0a81b9957b2f8
tensor2tensor/utils/t2t_model.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -1430,7 +1430,7 @@ class T2TModel(base.Layer): # Fathom # allow model to emit additional outputs hardcoding in feature # keys t2t uses - SKIP_FEATURES = ['inputs', 'targets', 'infer_targets', 'outputs', 'scores', 'problem_choice'] + SKIP_FEATURES = ['inputs', 'targets', 'infer_targets', 'outputs', 'scores', 'problem_choice', 'example_id'] for k in infer_out: if k in SKIP_FEATURES: continue assert k not in predictions, (f'{k} should not be in {predictions.keys()}')
SKIP_FEATURES = [ 'inputs' , 'targets' , 'infer_targets' , 'outputs' , 'scores' , 'problem_choice' ]
SKIP_FEATURES = [ 'inputs' , 'targets' , 'infer_targets' , 'outputs' , 'scores' , 'problem_choice' , 'example_id' ]
ADD_ELEMENTS_TO_ITERABLE
[["Insert", ["list", 3, 21, 3, 98], [",:,", "T"], 12], ["Insert", ["list", 3, 21, 3, 98], ["string:'example_id'", "T"], 13]]
medicode/tensor2tensor@ea53aee4b4fa0c28421dbef64f544be08682f4af
fix
[ { "sha": "a1eb9ed2afb24994eec4ddeb803ba70a55e2355a", "filename": "tensor2tensor/utils/t2t_model.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/ea53aee4b4fa0c28421dbef64f544be08682f4af/tensor2tensor%2Futils%2Ft2t_model.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/ea53aee4b4fa0c28421dbef64f544be08682f4af/tensor2tensor%2Futils%2Ft2t_model.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Ft2t_model.py?ref=ea53aee4b4fa0c28421dbef64f544be08682f4af", "patch": "@@ -1430,7 +1430,7 @@ def estimator_spec_predict(self, features, use_tpu=False):\n # Fathom\n # allow model to emit additional outputs hardcoding in feature\n # keys t2t uses\n- SKIP_FEATURES = ['inputs', 'targets', 'infer_targets', 'outputs', 'scores', 'problem_choice']\n+ SKIP_FEATURES = ['inputs', 'targets', 'infer_targets', 'outputs', 'scores', 'problem_choice', 'example_id']\n for k in infer_out:\n if k in SKIP_FEATURES: continue\n assert k not in predictions, (f'{k} should not be in {predictions.keys()}')" } ]
tensor2tensor
6f4f9133e9fee011955810291bee8125a14ba055
5f1fdbf9844254a9d1ce4d35d1af25038c00a79c
tensor2tensor/layers/common_attention.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -3114,7 +3114,7 @@ def compute_attention_component(antecedent, antecedent, total_depth, use_bias=False, name=name) else: return common_layers.conv1d( - antecedent, total_depth, filter_width, padding, name=name) + antecedent, total_depth, filter_width, padding=padding, name=name) def compute_qkv(query_antecedent,
return common_layers . conv1d ( antecedent , total_depth , filter_width , padding , name = name )
return common_layers . conv1d ( antecedent , total_depth , filter_width , padding = padding , name = name )
SINGLE_STMT
[["Insert", ["argument_list", 2, 32, 3, 67], ["keyword_argument", "N0"], 7], ["Move", "N0", ["identifier:padding", 3, 48, 3, 55], 0], ["Insert", "N0", ["=:=", "T"], 1], ["Insert", "N0", ["identifier:padding", "T"], 2]]
medicode/tensor2tensor@6f4f9133e9fee011955810291bee8125a14ba055
Fix argument passing to common_layers.conv1d PiperOrigin-RevId: 207728760
[ { "sha": "6393de99bd9383645d5b3462b4f1b18a8d6d596a", "filename": "tensor2tensor/layers/common_attention.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/6f4f9133e9fee011955810291bee8125a14ba055/tensor2tensor%2Flayers%2Fcommon_attention.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/6f4f9133e9fee011955810291bee8125a14ba055/tensor2tensor%2Flayers%2Fcommon_attention.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Flayers%2Fcommon_attention.py?ref=6f4f9133e9fee011955810291bee8125a14ba055", "patch": "@@ -3114,7 +3114,7 @@ def compute_attention_component(antecedent,\n antecedent, total_depth, use_bias=False, name=name)\n else:\n return common_layers.conv1d(\n- antecedent, total_depth, filter_width, padding, name=name)\n+ antecedent, total_depth, filter_width, padding=padding, name=name)\n \n \n def compute_qkv(query_antecedent," } ]
tensor2tensor
2fd91d34b8e6d79599c0612e446175174e838b9d
2674c371dcc2b42bebfd60bdda5eeadf260fc236
tensor2tensor/utils/get_rouge.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -46,7 +46,7 @@ def prep_data(decode_dir, target_dir): write_to_file(os.path.join(decode_dir, "rouge.%06d.txt" % (i+1)), d) write_to_file(os.path.join(target_dir, "rouge.A.%06d.txt" % (i+1)), t) if (i+1 % 1000) == 0: - tf.logging.into("Written %d examples to file" % i) + tf.logging.info("Written %d examples to file" % i) def main(_):
tf . logging . into ( "Written %d examples to file" % i )
tf . logging . info ( "Written %d examples to file" % i )
WRONG_FUNCTION_NAME
[["Update", ["identifier:into", 3, 22, 3, 26], "info"]]
medicode/tensor2tensor@2fd91d34b8e6d79599c0612e446175174e838b9d
typo fixed from tf.logging.into to tf.logging.info
[ { "sha": "65dc883a60c443042e387a7880f5ea8726f8c49c", "filename": "tensor2tensor/utils/get_rouge.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/2fd91d34b8e6d79599c0612e446175174e838b9d/tensor2tensor%2Futils%2Fget_rouge.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/2fd91d34b8e6d79599c0612e446175174e838b9d/tensor2tensor%2Futils%2Fget_rouge.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fget_rouge.py?ref=2fd91d34b8e6d79599c0612e446175174e838b9d", "patch": "@@ -46,7 +46,7 @@ def prep_data(decode_dir, target_dir):\n write_to_file(os.path.join(decode_dir, \"rouge.%06d.txt\" % (i+1)), d)\n write_to_file(os.path.join(target_dir, \"rouge.A.%06d.txt\" % (i+1)), t)\n if (i+1 % 1000) == 0:\n- tf.logging.into(\"Written %d examples to file\" % i)\n+ tf.logging.info(\"Written %d examples to file\" % i)\n \n \n def main(_):" } ]
tensor2tensor
e6caaf2afe0e2dda58aa43483ea2d214dcdc4be1
30fe32c0a4f0a5333dd4e921f538dac93c03e099
tensor2tensor/models/research/glow_ops_test.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for google3.third_party.py.tensor2tensor.models.research.glow_ops.""" +"""Tests for tensor2tensor.models.research.glow_ops.""" from __future__ import absolute_import from __future__ import division
"""Tests for google3.third_party.py.tensor2tensor.models.research.glow_ops."""
"""Tests for tensor2tensor.models.research.glow_ops."""
CHANGE_STRING_LITERAL
[["Update", ["string:\"\"\"Tests for google3.third_party.py.tensor2tensor.models.research.glow_ops.\"\"\"", 3, 1, 3, 79], "\"\"\"Tests for tensor2tensor.models.research.glow_ops.\"\"\""]]
medicode/tensor2tensor@e6caaf2afe0e2dda58aa43483ea2d214dcdc4be1
Fix description PiperOrigin-RevId: 208295334
[ { "sha": "0e42f71ea4c179983d96209692e4535c47abb418", "filename": "tensor2tensor/models/research/glow_ops_test.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/e6caaf2afe0e2dda58aa43483ea2d214dcdc4be1/tensor2tensor%2Fmodels%2Fresearch%2Fglow_ops_test.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/e6caaf2afe0e2dda58aa43483ea2d214dcdc4be1/tensor2tensor%2Fmodels%2Fresearch%2Fglow_ops_test.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Fglow_ops_test.py?ref=e6caaf2afe0e2dda58aa43483ea2d214dcdc4be1", "patch": "@@ -12,7 +12,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-\"\"\"Tests for google3.third_party.py.tensor2tensor.models.research.glow_ops.\"\"\"\n+\"\"\"Tests for tensor2tensor.models.research.glow_ops.\"\"\"\n \n from __future__ import absolute_import\n from __future__ import division" } ]
tensor2tensor
0e2b974511848de4cdd029057057313e3266df48
419e13c3e04af91d2ba158ec311ed23603185f37
tensor2tensor/models/research/next_frame_params.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -73,7 +73,7 @@ def next_frame_stochastic(): hparams.video_modality_loss_cutoff = 0.0 hparams.add_hparam("stochastic_model", True) hparams.add_hparam("reward_prediction", True) - hparams.add_hparam("reward_prediction_stop_gradient", False) + hparams.add_hparam("reward_prediction_stop_gradient", True) hparams.add_hparam("model_options", "CDNA") hparams.add_hparam("num_masks", 10) hparams.add_hparam("latent_channels", 1)
hparams . add_hparam ( "reward_prediction_stop_gradient" , False )
hparams . add_hparam ( "reward_prediction_stop_gradient" , True )
CHANGE_BOOLEAN_LITERAL
[["Insert", ["argument_list", 3, 21, 3, 63], ["true:True", "T"], 3], ["Delete", ["false:False", 3, 57, 3, 62]]]
medicode/tensor2tensor@0e2b974511848de4cdd029057057313e3266df48
change the default for reward_prediction_stop_gradient to True. PiperOrigin-RevId: 208426661
[ { "sha": "dd0559818d32b17e0b2f130b55652452d0f32336", "filename": "tensor2tensor/models/research/next_frame_params.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/0e2b974511848de4cdd029057057313e3266df48/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_params.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/0e2b974511848de4cdd029057057313e3266df48/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_params.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_params.py?ref=0e2b974511848de4cdd029057057313e3266df48", "patch": "@@ -73,7 +73,7 @@ def next_frame_stochastic():\n hparams.video_modality_loss_cutoff = 0.0\n hparams.add_hparam(\"stochastic_model\", True)\n hparams.add_hparam(\"reward_prediction\", True)\n- hparams.add_hparam(\"reward_prediction_stop_gradient\", False)\n+ hparams.add_hparam(\"reward_prediction_stop_gradient\", True)\n hparams.add_hparam(\"model_options\", \"CDNA\")\n hparams.add_hparam(\"num_masks\", 10)\n hparams.add_hparam(\"latent_channels\", 1)" } ]
tensor2tensor
4daee8581c8a089062ef60742d9db6e18d90316c
408c48c602bb36100ab50cd41735c25cbf2c9ee7
tensor2tensor/data_generators/problem.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -866,7 +866,7 @@ class Problem(object): # on TPU, we use params["batch_size"], which specifies the number of # examples across all datashards batch_size = params["batch_size"] - dataset = dataset.batch(batch_size, drop_remainder=True) + dataset = dataset.batch(batch_size) else: num_shards = config.data_parallelism.n if config else 1 batch_size = hparams.batch_size * num_shards
dataset = dataset . batch ( batch_size , drop_remainder = True )
dataset = dataset . batch ( batch_size )
SAME_FUNCTION_LESS_ARGS
[["Delete", [",:,", 3, 43, 3, 44]], ["Delete", ["identifier:drop_remainder", 3, 45, 3, 59]], ["Delete", ["=:=", 3, 59, 3, 60]], ["Delete", ["true:True", 3, 60, 3, 64]], ["Delete", ["keyword_argument", 3, 45, 3, 64]]]
medicode/tensor2tensor@4daee8581c8a089062ef60742d9db6e18d90316c
fix unexpected keyword 'drop_remainder' on TPU
[ { "sha": "e1884ff98af7411d4586daa654f79b4dd5a73ab1", "filename": "tensor2tensor/data_generators/problem.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/4daee8581c8a089062ef60742d9db6e18d90316c/tensor2tensor%2Fdata_generators%2Fproblem.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/4daee8581c8a089062ef60742d9db6e18d90316c/tensor2tensor%2Fdata_generators%2Fproblem.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fproblem.py?ref=4daee8581c8a089062ef60742d9db6e18d90316c", "patch": "@@ -866,7 +866,7 @@ def define_shapes(example):\n # on TPU, we use params[\"batch_size\"], which specifies the number of\n # examples across all datashards\n batch_size = params[\"batch_size\"]\n- dataset = dataset.batch(batch_size, drop_remainder=True)\n+ dataset = dataset.batch(batch_size)\n else:\n num_shards = config.data_parallelism.n if config else 1\n batch_size = hparams.batch_size * num_shards" } ]
tensor2tensor
d6d4dae5861e695b02a1aa14c187447f4c4a3624
f5938ccc10c4cf37965c42a3c8c5b5349310cb3d
tensor2tensor/models/research/next_frame_sv2p.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -314,7 +314,7 @@ class NextFrameStochastic(next_frame.NextFrameBasic): # No latent in the first phase iter_num = self.get_iteration_num() ret_mean, ret_std = tf.cond( - iter_num < self.hparams.num_iterations_1st_stage, + tf.less(iter_num, self.hparams.num_iterations_1st_stage), lambda: (tf.zeros_like(mean), tf.zeros_like(std)), lambda: (mean, std))
ret_mean , ret_std = tf . cond ( iter_num < self . hparams . num_iterations_1st_stage , lambda : ( tf . zeros_like ( mean ) , tf . zeros_like ( std ) ) , lambda : ( mean , std ) )
ret_mean , ret_std = tf . cond ( tf . less ( iter_num , self . hparams . num_iterations_1st_stage ) , lambda : ( tf . zeros_like ( mean ) , tf . zeros_like ( std ) ) , lambda : ( mean , std ) )
SINGLE_STMT
[["Insert", ["argument_list", 2, 34, 5, 31], ["call", "N0"], 1], ["Insert", "N0", ["attribute", "N1"], 0], ["Insert", "N0", ["argument_list", "N2"], 1], ["Insert", "N1", ["identifier:tf", "T"], 0], ["Insert", "N1", [".:.", "T"], 1], ["Insert", "N1", ["identifier:less", "T"], 2], ["Insert", "N2", ["(:(", "T"], 0], ["Move", "N2", ["identifier:iter_num", 3, 11, 3, 19], 1], ["Insert", "N2", [",:,", "T"], 2], ["Move", "N2", ["attribute", 3, 22, 3, 59], 3], ["Insert", "N2", ["):)", "T"], 4], ["Delete", ["<:<", 3, 20, 3, 21]], ["Delete", ["comparison_operator", 3, 11, 3, 59]]]
medicode/tensor2tensor@d6d4dae5861e695b02a1aa14c187447f4c4a3624
fixing OSS version. PiperOrigin-RevId: 208538283
[ { "sha": "f5da10330de9728f31e56f155b4ffb9b3724fec5", "filename": "tensor2tensor/models/research/next_frame_sv2p.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/d6d4dae5861e695b02a1aa14c187447f4c4a3624/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_sv2p.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/d6d4dae5861e695b02a1aa14c187447f4c4a3624/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_sv2p.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Fnext_frame_sv2p.py?ref=d6d4dae5861e695b02a1aa14c187447f4c4a3624", "patch": "@@ -314,7 +314,7 @@ def construct_latent_tower(self, images):\n # No latent in the first phase\n iter_num = self.get_iteration_num()\n ret_mean, ret_std = tf.cond(\n- iter_num < self.hparams.num_iterations_1st_stage,\n+ tf.less(iter_num, self.hparams.num_iterations_1st_stage),\n lambda: (tf.zeros_like(mean), tf.zeros_like(std)),\n lambda: (mean, std))\n " } ]
tensor2tensor
967a578beef2064a9824febb80cc54f248329e6d
3d194cb14c4ae563895fbd910e6ed82809baee3f
tensor2tensor/data_generators/vqa.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -32,7 +32,7 @@ from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import image_utils from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder -from tensor2tensor.google.data_generators import vqa_utils +from tensor2tensor.data_generators import vqa_utils from tensor2tensor.utils import metrics from tensor2tensor.utils import registry
from tensor2tensor . google . data_generators import vqa_utils
from tensor2tensor . data_generators import vqa_utils
SINGLE_STMT
[["Delete", ["identifier:google", 3, 20, 3, 26]], ["Delete", [".:.", 3, 26, 3, 27]]]
medicode/tensor2tensor@967a578beef2064a9824febb80cc54f248329e6d
fix vqa_utils import PiperOrigin-RevId: 208559948
[ { "sha": "27f025c1d2666fc2c591d8cd638c3cf578eccd91", "filename": "tensor2tensor/data_generators/vqa.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/967a578beef2064a9824febb80cc54f248329e6d/tensor2tensor%2Fdata_generators%2Fvqa.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/967a578beef2064a9824febb80cc54f248329e6d/tensor2tensor%2Fdata_generators%2Fvqa.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fvqa.py?ref=967a578beef2064a9824febb80cc54f248329e6d", "patch": "@@ -32,7 +32,7 @@\n from tensor2tensor.data_generators import image_utils\n from tensor2tensor.data_generators import problem\n from tensor2tensor.data_generators import text_encoder\n-from tensor2tensor.google.data_generators import vqa_utils\n+from tensor2tensor.data_generators import vqa_utils\n from tensor2tensor.utils import metrics\n from tensor2tensor.utils import registry\n " } ]
tensor2tensor
4bbd59569fd5efef4604605bb839f11fd09987ef
9e3fadb9e294302b6c3215e8275eb51d60b2e61f
tensor2tensor/models/research/universal_transformer_util.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -578,7 +578,7 @@ def universal_transformer_basic(layer_inputs, - state, inputs, memory = layer_inputs + state, inputs, memory = tf.unstack(layer_inputs,num=None,axis=0,name="unstack") state = step_preprocess(state, step, hparams) new_state = ffn_unit(attention_unit(state))
state , inputs , memory = layer_inputs
state , inputs , memory = tf . unstack ( layer_inputs , num = None , axis = 0 , name = "unstack" )
ADD_FUNCTION_AROUND_EXPRESSION
[["Insert", ["assignment", 0, 3, 0, 39], ["call", "N0"], 2], ["Insert", "N0", ["attribute", "N1"], 0], ["Insert", "N0", ["argument_list", "N2"], 1], ["Insert", "N1", ["identifier:tf", "T"], 0], ["Insert", "N1", [".:.", "T"], 1], ["Insert", "N1", ["identifier:unstack", "T"], 2], ["Insert", "N2", ["(:(", "T"], 0], ["Move", "N2", ["identifier:layer_inputs", 0, 27, 0, 39], 1], ["Insert", "N2", [",:,", "T"], 2], ["Insert", "N2", ["keyword_argument", "N3"], 3], ["Insert", "N2", [",:,", "T"], 4], ["Insert", "N2", ["keyword_argument", "N4"], 5], ["Insert", "N2", [",:,", "T"], 6], ["Insert", "N2", ["keyword_argument", "N5"], 7], ["Insert", "N2", ["):)", "T"], 8], ["Insert", "N3", ["identifier:num", "T"], 0], ["Insert", "N3", ["=:=", "T"], 1], ["Insert", "N3", ["none:None", "T"], 2], ["Insert", "N4", ["identifier:axis", "T"], 0], ["Insert", "N4", ["=:=", "T"], 1], ["Insert", "N4", ["integer:0", "T"], 2], ["Insert", "N5", ["identifier:name", "T"], 0], ["Insert", "N5", ["=:=", "T"], 1], ["Insert", "N5", ["string:\"unstack\"", "T"], 2]]
medicode/tensor2tensor@4bbd59569fd5efef4604605bb839f11fd09987ef
Update universal_transformer_util.py to fix TypeError (#987)
[ { "sha": "62c71590caddc0f0d3b05447f52e287b88710b3b", "filename": "tensor2tensor/models/research/universal_transformer_util.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/4bbd59569fd5efef4604605bb839f11fd09987ef/tensor2tensor%2Fmodels%2Fresearch%2Funiversal_transformer_util.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/4bbd59569fd5efef4604605bb839f11fd09987ef/tensor2tensor%2Fmodels%2Fresearch%2Funiversal_transformer_util.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Funiversal_transformer_util.py?ref=4bbd59569fd5efef4604605bb839f11fd09987ef", "patch": "@@ -578,7 +578,7 @@ def universal_transformer_basic(layer_inputs,\n layer_output:\n new_state: new state\n \"\"\"\n- state, inputs, memory = layer_inputs\n+ state, inputs, memory = tf.unstack(layer_inputs,num=None,axis=0,name=\"unstack\")\n state = step_preprocess(state, step, hparams)\n \n new_state = ffn_unit(attention_unit(state))" } ]
tensor2tensor
86ae6c50bacb576c152642928415cdbe1f467070
fb5accee899436676f67f9c2e94ff9963e2dbc79
tensor2tensor/bin/t2t_datagen.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -70,7 +70,7 @@ flags.DEFINE_integer("task_id", -1, "For distributed data generation.") flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.") flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.") flags.DEFINE_integer( - "num_concurrent_processes", 10, + "num_concurrent_processes", None, "Applies only to problems for which multiprocess_generate=True.") flags.DEFINE_string("t2t_usr_dir", "", "Path to a Python module that will be imported. The "
flags . DEFINE_integer ( "num_concurrent_processes" , 10 , "Applies only to problems for which multiprocess_generate=True." )
flags . DEFINE_integer ( "num_concurrent_processes" , None , "Applies only to problems for which multiprocess_generate=True." )
SINGLE_TOKEN
[["Insert", ["argument_list", 2, 21, 4, 70], ["none:None", "T"], 3], ["Delete", ["integer:10", 3, 33, 3, 35]]]
medicode/tensor2tensor@86ae6c50bacb576c152642928415cdbe1f467070
Make default num processes for multiprocessing problems None to use number of processes=cpu_count. PiperOrigin-RevId: 193595891
[ { "sha": "d1dcab83486a51d018dfd47d7e16795d076ceb87", "filename": "tensor2tensor/bin/t2t_datagen.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/86ae6c50bacb576c152642928415cdbe1f467070/tensor2tensor%2Fbin%2Ft2t_datagen.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/86ae6c50bacb576c152642928415cdbe1f467070/tensor2tensor%2Fbin%2Ft2t_datagen.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fbin%2Ft2t_datagen.py?ref=86ae6c50bacb576c152642928415cdbe1f467070", "patch": "@@ -70,7 +70,7 @@\n flags.DEFINE_integer(\"task_id_start\", -1, \"For distributed data generation.\")\n flags.DEFINE_integer(\"task_id_end\", -1, \"For distributed data generation.\")\n flags.DEFINE_integer(\n- \"num_concurrent_processes\", 10,\n+ \"num_concurrent_processes\", None,\n \"Applies only to problems for which multiprocess_generate=True.\")\n flags.DEFINE_string(\"t2t_usr_dir\", \"\",\n \"Path to a Python module that will be imported. The \"" } ]
tensor2tensor
d444569dbe568218bb989d2cb4829e3bbac43239
80e1e21b70b6822199ef42f292d753b959a7d919
tensor2tensor/utils/metrics.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -216,7 +216,7 @@ def padded_accuracy_outputs(predictions, weights = weights_fn(padded_labels) padded_outputs = tf.to_int32(padded_outputs) padded_labels = tf.to_int32(padded_labels) - weights = tf.Print(weights, [weights], summarize=100, message='weights') + padded_outputs = tf.to_int32(padded_outputs) return tf.to_float(tf.equal(padded_outputs, padded_labels)), weights
weights = tf . Print ( weights , [ weights ] , summarize = 100 , message = 'weights' )
padded_outputs = tf . to_int32 ( padded_outputs )
SINGLE_STMT
[["Update", ["identifier:weights", 3, 5, 3, 12], "padded_outputs"], ["Update", ["identifier:Print", 3, 18, 3, 23], "to_int32"], ["Update", ["identifier:weights", 3, 24, 3, 31], "padded_outputs"], ["Delete", [",:,", 3, 31, 3, 32]], ["Delete", ["[:[", 3, 33, 3, 34]], ["Delete", ["identifier:weights", 3, 34, 3, 41]], ["Delete", ["]:]", 3, 41, 3, 42]], ["Delete", ["list", 3, 33, 3, 42]], ["Delete", [",:,", 3, 42, 3, 43]], ["Delete", ["identifier:summarize", 3, 44, 3, 53]], ["Delete", ["=:=", 3, 53, 3, 54]], ["Delete", ["integer:100", 3, 54, 3, 57]], ["Delete", ["keyword_argument", 3, 44, 3, 57]], ["Delete", [",:,", 3, 57, 3, 58]], ["Delete", ["identifier:message", 3, 59, 3, 66]], ["Delete", ["=:=", 3, 66, 3, 67]], ["Delete", ["string:'weights'", 3, 67, 3, 76]], ["Delete", ["keyword_argument", 3, 59, 3, 76]]]
medicode/tensor2tensor@d444569dbe568218bb989d2cb4829e3bbac43239
possible bugfix
[ { "sha": "90143f4eff424ac96c594e371e98deba40ff56b6", "filename": "tensor2tensor/utils/metrics.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/d444569dbe568218bb989d2cb4829e3bbac43239/tensor2tensor%2Futils%2Fmetrics.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/d444569dbe568218bb989d2cb4829e3bbac43239/tensor2tensor%2Futils%2Fmetrics.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fmetrics.py?ref=d444569dbe568218bb989d2cb4829e3bbac43239", "patch": "@@ -216,7 +216,7 @@ def padded_accuracy_outputs(predictions,\n weights = weights_fn(padded_labels)\n padded_outputs = tf.to_int32(padded_outputs)\n padded_labels = tf.to_int32(padded_labels)\n- weights = tf.Print(weights, [weights], summarize=100, message='weights')\n+ padded_outputs = tf.to_int32(padded_outputs)\n return tf.to_float(tf.equal(padded_outputs, padded_labels)), weights\n \n " } ]
tensor2tensor
afc37cfd1eebd57214ba0e8a7741152e3e02d1e2
ee7ee95697f55c7b3c0d61fe21c9ebce4d5427ef
tensor2tensor/utils/metrics.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -472,7 +472,7 @@ def create_evaluation_metrics(problems, model_hparams): # (epurdy/fathom) see comment in model_builder.py, function # combine_shards for discussion if isinstance(predictions, dict): - if 'outputs' in args or keywords: + if 'outputs' in args or 'outputs' in keywords: kwargs['outputs'] = predictions['outputs'] logits = predictions['logits'] else:
if 'outputs' in args or keywords : kwargs [ 'outputs' ] = predictions [ 'outputs' ]
if 'outputs' in args or 'outputs' in keywords : kwargs [ 'outputs' ] = predictions [ 'outputs' ]
CHANGE_BINARY_OPERAND
[["Insert", ["boolean_operator", 3, 12, 3, 41], ["comparison_operator", "N0"], 2], ["Insert", "N0", ["string:'outputs'", "T"], 0], ["Insert", "N0", ["in:in", "T"], 1], ["Move", "N0", ["identifier:keywords", 3, 33, 3, 41], 2]]
medicode/tensor2tensor@afc37cfd1eebd57214ba0e8a7741152e3e02d1e2
bugfix
[ { "sha": "32315d12a8eb2a3b34b43652011264ac2ec71655", "filename": "tensor2tensor/utils/metrics.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/afc37cfd1eebd57214ba0e8a7741152e3e02d1e2/tensor2tensor%2Futils%2Fmetrics.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/afc37cfd1eebd57214ba0e8a7741152e3e02d1e2/tensor2tensor%2Futils%2Fmetrics.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fmetrics.py?ref=afc37cfd1eebd57214ba0e8a7741152e3e02d1e2", "patch": "@@ -472,7 +472,7 @@ def problem_metric_fn(predictions, features, labels):\n # (epurdy/fathom) see comment in model_builder.py, function\n # combine_shards for discussion\n if isinstance(predictions, dict):\n- if 'outputs' in args or keywords:\n+ if 'outputs' in args or 'outputs' in keywords:\n kwargs['outputs'] = predictions['outputs']\n logits = predictions['logits']\n else:" } ]
tensor2tensor
0ed84153c3e5228414095e1274cdd5623ea93a11
47e2327b656aa9121597144403422d49845c932a
tensor2tensor/data_generators/generator_utils.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -130,7 +130,7 @@ def outputs_exist(filenames): return out_fname # Fathom -def generate_files(generator, output_filenames, max_cases=None, check_existing_files = False): +def generate_files(generator, output_filenames, max_cases=None, check_existing_files = True):
def generate_files ( generator , output_filenames , max_cases = None , check_existing_files = False ) :
def generate_files ( generator , output_filenames , max_cases = None , check_existing_files = True ) :
CHANGE_BOOLEAN_LITERAL
[["Insert", ["default_parameter", 3, 65, 3, 93], ["true:True", "T"], 2], ["Delete", ["false:False", 3, 88, 3, 93]]]
medicode/tensor2tensor@0ed84153c3e5228414095e1274cdd5623ea93a11
default True to not break other t2t dependencies
[ { "sha": "625a6cee7060a5dbfe30fdb8f25222201dc902f2", "filename": "tensor2tensor/data_generators/generator_utils.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/0ed84153c3e5228414095e1274cdd5623ea93a11/tensor2tensor%2Fdata_generators%2Fgenerator_utils.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/0ed84153c3e5228414095e1274cdd5623ea93a11/tensor2tensor%2Fdata_generators%2Fgenerator_utils.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fgenerator_utils.py?ref=0ed84153c3e5228414095e1274cdd5623ea93a11", "patch": "@@ -130,7 +130,7 @@ def outputs_exist(filenames):\n return out_fname\n \n # Fathom\n-def generate_files(generator, output_filenames, max_cases=None, check_existing_files = False):\n+def generate_files(generator, output_filenames, max_cases=None, check_existing_files = True):\n \"\"\"Generate cases from a generator and save as TFRecord files.\n \n Generated cases are transformed to tf.Example protos and saved as TFRecords" } ]
tensor2tensor
915679059e1164d82db0308eb4aeef391ce40d63
7fb0d71e6d0dcbcf200b2ff5026d13069ce58ffe
tensor2tensor/utils/metrics.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -418,7 +418,7 @@ def set_auc(predictions, labels = tf.cast(labels, tf.bool) labels = labels[:, 1:] predictions = tf.nn.sigmoid(predictions) - auc, update_op = tf.metrics.auc(labels, predictions, curve='PR') + auc, update_op = tf.metrics.auc(labels, predictions, weights_fn=weights_fn, curve='PR') with tf.control_dependencies([update_op]): auc = tf.identity(auc)
auc , update_op = tf . metrics . auc ( labels , predictions , curve = 'PR' )
auc , update_op = tf . metrics . auc ( labels , predictions , weights_fn = weights_fn , curve = 'PR' )
SAME_FUNCTION_MORE_ARGS
[["Insert", ["argument_list", 3, 36, 3, 69], ["keyword_argument", "N0"], 5], ["Insert", ["argument_list", 3, 36, 3, 69], [",:,", "T"], 6], ["Insert", "N0", ["identifier:weights_fn", "T"], 0], ["Insert", "N0", ["=:=", "T"], 1], ["Insert", "N0", ["identifier:weights_fn", "T"], 2]]
medicode/tensor2tensor@915679059e1164d82db0308eb4aeef391ce40d63
fix the problem
[ { "sha": "45f277c1a2d87b3a7da6ee149054dacbfaf92ba8", "filename": "tensor2tensor/utils/metrics.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/915679059e1164d82db0308eb4aeef391ce40d63/tensor2tensor%2Futils%2Fmetrics.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/915679059e1164d82db0308eb4aeef391ce40d63/tensor2tensor%2Futils%2Fmetrics.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fmetrics.py?ref=915679059e1164d82db0308eb4aeef391ce40d63", "patch": "@@ -418,7 +418,7 @@ def set_auc(predictions,\n labels = tf.cast(labels, tf.bool)\n labels = labels[:, 1:]\n predictions = tf.nn.sigmoid(predictions)\n- auc, update_op = tf.metrics.auc(labels, predictions, curve='PR')\n+ auc, update_op = tf.metrics.auc(labels, predictions, weights_fn=weights_fn, curve='PR')\n \n with tf.control_dependencies([update_op]):\n auc = tf.identity(auc)" } ]
tensor2tensor
9bce6a204350d699a326bdca3adf5ce93618d465
5042e17698c205bd0f4aade8985032c550b679b6
tensor2tensor/utils/metrics.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -416,7 +416,7 @@ def set_auc(predictions, labels = tf.one_hot(labels, predictions.shape[-1] + 1) labels = tf.reduce_max(labels, axis=1) # gah this is so hacky, now we suppress empty sets... - weights = tf.reduce_max(labels, axis=1, keep_dims=True) + weights = tf.reduce_max(labels[:, 1:], axis=1, keep_dims=True) labels = tf.cast(labels, tf.bool) labels = labels[:, 1:] predictions = tf.nn.sigmoid(predictions)
weights = tf . reduce_max ( labels , axis = 1 , keep_dims = True )
weights = tf . reduce_max ( labels [ : , 1 : ] , axis = 1 , keep_dims = True )
SINGLE_STMT
[["Insert", ["argument_list", 3, 28, 3, 60], ["subscript", "N0"], 1], ["Insert", ["argument_list", 3, 28, 3, 60], [",:,", "T"], 2], ["Move", "N0", ["identifier:labels", 3, 29, 3, 35], 0], ["Insert", "N0", ["[:[", "T"], 1], ["Insert", "N0", ["slice", "N1"], 2], ["Move", "N0", [",:,", 3, 35, 3, 36], 3], ["Insert", "N0", ["slice", "N2"], 4], ["Insert", "N0", ["]:]", "T"], 5], ["Insert", "N1", [":::", "T"], 0], ["Insert", "N2", ["integer:1", "T"], 0], ["Insert", "N2", [":::", "T"], 1]]
medicode/tensor2tensor@9bce6a204350d699a326bdca3adf5ce93618d465
bugfix
[ { "sha": "cdc1127cecf97514b40928484cc8b2e4d86ffa21", "filename": "tensor2tensor/utils/metrics.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/9bce6a204350d699a326bdca3adf5ce93618d465/tensor2tensor%2Futils%2Fmetrics.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/9bce6a204350d699a326bdca3adf5ce93618d465/tensor2tensor%2Futils%2Fmetrics.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fmetrics.py?ref=9bce6a204350d699a326bdca3adf5ce93618d465", "patch": "@@ -416,7 +416,7 @@ def set_auc(predictions,\n labels = tf.one_hot(labels, predictions.shape[-1] + 1)\n labels = tf.reduce_max(labels, axis=1)\n # gah this is so hacky, now we suppress empty sets...\n- weights = tf.reduce_max(labels, axis=1, keep_dims=True)\n+ weights = tf.reduce_max(labels[:, 1:], axis=1, keep_dims=True)\n labels = tf.cast(labels, tf.bool)\n labels = labels[:, 1:]\n predictions = tf.nn.sigmoid(predictions)" } ]
tensor2tensor
d957bdbbbe246d2a25bdb71760da84f9dfa02e55
1c11e51c7d3fd440927a3b5cd0f542ec4c813140
tensor2tensor/data_generators/squad.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -143,5 +143,5 @@ class SquadConcatPositioned(SquadConcat): for sample in samples: sample['targets'] = self.generate_targets(sample['targets'], sample['context']) - if not sample['targets']: + if sample['targets']: yield sample
if not sample [ 'targets' ] : yield sample
if sample [ 'targets' ] : yield sample
CHANGE_UNARY_OPERATOR
[["Move", ["if_statement", 3, 7, 4, 21], ["subscript", 3, 14, 3, 31], 1], ["Delete", ["not:not", 3, 10, 3, 13]], ["Delete", ["not_operator", 3, 10, 3, 31]]]
medicode/tensor2tensor@d957bdbbbe246d2a25bdb71760da84f9dfa02e55
Fix if condition in squad_concat data generator. PiperOrigin-RevId: 194976019
[ { "sha": "e1930724283ec83b9eca097676cb467209b7deef", "filename": "tensor2tensor/data_generators/squad.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/d957bdbbbe246d2a25bdb71760da84f9dfa02e55/tensor2tensor%2Fdata_generators%2Fsquad.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/d957bdbbbe246d2a25bdb71760da84f9dfa02e55/tensor2tensor%2Fdata_generators%2Fsquad.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fsquad.py?ref=d957bdbbbe246d2a25bdb71760da84f9dfa02e55", "patch": "@@ -143,5 +143,5 @@ def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n for sample in samples:\n sample['targets'] = self.generate_targets(sample['targets'],\n sample['context'])\n- if not sample['targets']:\n+ if sample['targets']:\n yield sample" } ]
tensor2tensor
78d5a1372244061a776e65b5eca6f8291632177d
c6825cfdb6bb4008ea2c7ca13e9948f32d2744be
tensor2tensor/data_generators/cifar.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -93,7 +93,7 @@ def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): all_images, all_labels = [], [] for filename in data_files: path = os.path.join(tmp_dir, prefix, filename) - with tf.gfile.Open(path, "r") as f: + with tf.gfile.Open(path, "rb") as f: data = cPickle.load(f) images = data["data"] num_images = images.shape[0]
with tf . gfile . Open ( path , "r" ) as f : data = cPickle . load ( f )
with tf . gfile . Open ( path , "rb" ) as f : data = cPickle . load ( f )
CHANGE_STRING_LITERAL
[["Update", ["string:\"r\"", 3, 30, 3, 33], "\"rb\""]]
medicode/tensor2tensor@78d5a1372244061a776e65b5eca6f8291632177d
Potential bug fix for different python version PiperOrigin-RevId: 195870607
[ { "sha": "e2e27b787b3553ece41c2a2557ec8aedc3651e25", "filename": "tensor2tensor/data_generators/cifar.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/78d5a1372244061a776e65b5eca6f8291632177d/tensor2tensor%2Fdata_generators%2Fcifar.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/78d5a1372244061a776e65b5eca6f8291632177d/tensor2tensor%2Fdata_generators%2Fcifar.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fdata_generators%2Fcifar.py?ref=78d5a1372244061a776e65b5eca6f8291632177d", "patch": "@@ -93,7 +93,7 @@ def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):\n all_images, all_labels = [], []\n for filename in data_files:\n path = os.path.join(tmp_dir, prefix, filename)\n- with tf.gfile.Open(path, \"r\") as f:\n+ with tf.gfile.Open(path, \"rb\") as f:\n data = cPickle.load(f)\n images = data[\"data\"]\n num_images = images.shape[0]" } ]
tensor2tensor
fcafdd77ae214ec77eaa845a63b6e531e08964c8
5aa564cfeede835a822dd988c05c9871ff17a521
tensor2tensor/models/research/basic_conv_gen.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -76,7 +76,7 @@ class BasicConvGen(t2t_model.T2TModel): x = common_layers.layer_norm(x) # Add embedded action. - action = tf.reshape(features["input_action"][:, 1, :], + action = tf.reshape(features["input_action"][:, -1, :], [-1, 1, 1, hparams.hidden_size]) action_mask = tf.layers.dense(action, filters, name="action_mask") zeros_mask = tf.zeros(common_layers.shape_list(x)[:-1] + [filters],
action = tf . reshape ( features [ "input_action" ] [ : , 1 , : ] , [ - 1 , 1 , 1 , hparams . hidden_size ] )
action = tf . reshape ( features [ "input_action" ] [ : , - 1 , : ] , [ - 1 , 1 , 1 , hparams . hidden_size ] )
CHANGE_UNARY_OPERATOR
[["Insert", ["subscript", 3, 25, 3, 58], ["unary_operator", "N0"], 4], ["Insert", "N0", ["-:-", "T"], 0], ["Move", "N0", ["integer:1", 3, 53, 3, 54], 1]]
medicode/tensor2tensor@fcafdd77ae214ec77eaa845a63b6e531e08964c8
Fix for the last action to be taken for training.
[ { "sha": "762b5a9b1c24600682c7831a5346feaa044121f1", "filename": "tensor2tensor/models/research/basic_conv_gen.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/fcafdd77ae214ec77eaa845a63b6e531e08964c8/tensor2tensor%2Fmodels%2Fresearch%2Fbasic_conv_gen.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/fcafdd77ae214ec77eaa845a63b6e531e08964c8/tensor2tensor%2Fmodels%2Fresearch%2Fbasic_conv_gen.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Fresearch%2Fbasic_conv_gen.py?ref=fcafdd77ae214ec77eaa845a63b6e531e08964c8", "patch": "@@ -76,7 +76,7 @@ def body(self, features):\n x = common_layers.layer_norm(x)\n \n # Add embedded action.\n- action = tf.reshape(features[\"input_action\"][:, 1, :],\n+ action = tf.reshape(features[\"input_action\"][:, -1, :],\n [-1, 1, 1, hparams.hidden_size])\n action_mask = tf.layers.dense(action, filters, name=\"action_mask\")\n zeros_mask = tf.zeros(common_layers.shape_list(x)[:-1] + [filters]," } ]
tensor2tensor
699ff6ac7a3522bfbe36561b35e8aba5bb01fa24
9a27a55dedbd07e77754039573a2cd1a178da580
tensor2tensor/rl/envs/simulated_batch_env.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -93,7 +93,7 @@ class SimulatedBatchEnv(InGraphBatchEnv): # Ancient method for environments not supporting get_starting_data # This is probably not compatibile with NUMBER_OF_HISTORY_FRAMES!=2 # Should be removed at some point - num_frames = 2 + num_frames = SimulatedBatchEnv.NUMBER_OF_HISTORY_FRAMES initialization_env.reset() skip_frames = 20 for _ in range(skip_frames):
num_frames = 2
num_frames = SimulatedBatchEnv . NUMBER_OF_HISTORY_FRAMES
SINGLE_STMT
[["Insert", ["assignment", 3, 7, 3, 21], ["attribute", "N0"], 2], ["Insert", "N0", ["identifier:SimulatedBatchEnv", "T"], 0], ["Insert", "N0", [".:.", "T"], 1], ["Insert", "N0", ["identifier:NUMBER_OF_HISTORY_FRAMES", "T"], 2], ["Delete", ["integer:2", 3, 20, 3, 21]]]
medicode/tensor2tensor@699ff6ac7a3522bfbe36561b35e8aba5bb01fa24
small fix
[ { "sha": "732d336f623e668b290952569bb80d1e9bfe7b4a", "filename": "tensor2tensor/rl/envs/simulated_batch_env.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/699ff6ac7a3522bfbe36561b35e8aba5bb01fa24/tensor2tensor%2Frl%2Fenvs%2Fsimulated_batch_env.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/699ff6ac7a3522bfbe36561b35e8aba5bb01fa24/tensor2tensor%2Frl%2Fenvs%2Fsimulated_batch_env.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Frl%2Fenvs%2Fsimulated_batch_env.py?ref=699ff6ac7a3522bfbe36561b35e8aba5bb01fa24", "patch": "@@ -93,7 +93,7 @@ def __init__(self, environment_lambda, length):\n # Ancient method for environments not supporting get_starting_data\n # This is probably not compatibile with NUMBER_OF_HISTORY_FRAMES!=2\n # Should be removed at some point\n- num_frames = 2\n+ num_frames = SimulatedBatchEnv.NUMBER_OF_HISTORY_FRAMES\n initialization_env.reset()\n skip_frames = 20\n for _ in range(skip_frames):" } ]
tensor2tensor
6ad82d4001145348922e915d383e375c833a929c
d4ff56cbaab4cb01684b4e120a9c92bf449ace06
tensor2tensor/utils/t2t_model.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -665,7 +665,7 @@ class T2TModel(base.Layer): if top_beams == 1: samples = ids[:, 0, 1:] else: - samples = ids[:, :top_beams, 1] + samples = ids[:, :top_beams, 1:] return {"outputs": samples, "scores": scores}
samples = ids [ : , : top_beams , 1 ]
samples = ids [ : , : top_beams , 1 : ]
SINGLE_STMT
[["Insert", ["subscript", 3, 17, 3, 38], ["slice", "N0"], 6], ["Move", "N0", ["integer:1", 3, 36, 3, 37], 0], ["Insert", "N0", [":::", "T"], 1]]
medicode/tensor2tensor@6ad82d4001145348922e915d383e375c833a929c
Bug if top_beams > 1 If top_beams > 1 (return_beams=True) only the first word of every beam was returned. Bug introduced with last change of this line.
[ { "sha": "068de48df2d2d6fa13a8408c036ea681e23139ad", "filename": "tensor2tensor/utils/t2t_model.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/6ad82d4001145348922e915d383e375c833a929c/tensor2tensor%2Futils%2Ft2t_model.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/6ad82d4001145348922e915d383e375c833a929c/tensor2tensor%2Futils%2Ft2t_model.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Ft2t_model.py?ref=6ad82d4001145348922e915d383e375c833a929c", "patch": "@@ -665,7 +665,7 @@ def symbols_to_logits_fn(ids):\n if top_beams == 1:\n samples = ids[:, 0, 1:]\n else:\n- samples = ids[:, :top_beams, 1]\n+ samples = ids[:, :top_beams, 1:]\n \n return {\"outputs\": samples, \"scores\": scores}\n " } ]
tensor2tensor
9d943d547aba09d31a8b864266e0bdf7d5560558
b15dacdcc2c48d6ba267b75e2a58cba0deca93d1
tensor2tensor/utils/t2t_model.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -1073,7 +1073,7 @@ class T2TModel(base.Layer): if not hasattr(hparams, "problem"): raise NotImplementedError(_no_problem_err("estimator_spec_eval")) - problem = hparams.problem_instances[0] or hparams.problem + problem = get_problem_from_hparams(hparams) if common_layers.is_on_tpu(): # Fathom assert False, 'Not supporting TPUs yet'
problem = hparams . problem_instances [ 0 ] or hparams . problem
problem = get_problem_from_hparams ( hparams )
SINGLE_STMT
[["Insert", ["assignment", 3, 5, 3, 62], ["call", "N0"], 2], ["Update", ["identifier:hparams", 3, 15, 3, 22], "get_problem_from_hparams"], ["Move", "N0", ["identifier:hparams", 3, 15, 3, 22], 0], ["Insert", "N0", ["argument_list", "N1"], 1], ["Insert", "N1", ["(:(", "T"], 0], ["Move", "N1", ["identifier:hparams", 3, 47, 3, 54], 1], ["Insert", "N1", ["):)", "T"], 2], ["Delete", [".:.", 3, 22, 3, 23]], ["Delete", ["identifier:problem_instances", 3, 23, 3, 40]], ["Delete", ["attribute", 3, 15, 3, 40]], ["Delete", ["[:[", 3, 40, 3, 41]], ["Delete", ["integer:0", 3, 41, 3, 42]], ["Delete", ["]:]", 3, 42, 3, 43]], ["Delete", ["subscript", 3, 15, 3, 43]], ["Delete", ["or:or", 3, 44, 3, 46]], ["Delete", [".:.", 3, 54, 3, 55]], ["Delete", ["identifier:problem", 3, 55, 3, 62]], ["Delete", ["attribute", 3, 47, 3, 62]], ["Delete", ["boolean_operator", 3, 15, 3, 62]]]
medicode/tensor2tensor@9d943d547aba09d31a8b864266e0bdf7d5560558
track down remaining multitask issue
[ { "sha": "799c1d470330e60e817847ffeefc5c241cfdcc2c", "filename": "tensor2tensor/utils/t2t_model.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/9d943d547aba09d31a8b864266e0bdf7d5560558/tensor2tensor%2Futils%2Ft2t_model.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/9d943d547aba09d31a8b864266e0bdf7d5560558/tensor2tensor%2Futils%2Ft2t_model.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Ft2t_model.py?ref=9d943d547aba09d31a8b864266e0bdf7d5560558", "patch": "@@ -1073,7 +1073,7 @@ def estimator_spec_eval(self, features, logits, labels, loss, losses_dict):\n if not hasattr(hparams, \"problem\"):\n raise NotImplementedError(_no_problem_err(\"estimator_spec_eval\"))\n \n- problem = hparams.problem_instances[0] or hparams.problem\n+ problem = get_problem_from_hparams(hparams)\n if common_layers.is_on_tpu():\n # Fathom\n assert False, 'Not supporting TPUs yet'" } ]
tensor2tensor
cf5f4f6dfc08d14c5115d57d6f4e53119f98dd26
d50ec62697902a2b641a0d15a99d6818286a9034
tensor2tensor/utils/decoding.py
https://github.com/medicode/tensor2tensor
true
false
false
@@ -160,7 +160,7 @@ def decode_from_dataset(estimator, output_file = tf.gfile.Open(output_filepath, "w") target_file = tf.gfile.Open(target_filepath, "w") - problem_hparams = hparams.problems[problem_idx] + problem_hparams = hparams.problem_hparams # Inputs vocabulary is set to targets if there are no inputs in the problem, # e.g., for language models where the inputs are just a prefix of targets. has_input = "inputs" in problem_hparams.vocabulary
problem_hparams = hparams . problems [ problem_idx ]
problem_hparams = hparams . problem_hparams
SINGLE_STMT
[["Move", ["assignment", 3, 3, 3, 50], ["attribute", 3, 21, 3, 37], 2], ["Update", ["identifier:problems", 3, 29, 3, 37], "problem_hparams"], ["Delete", ["[:[", 3, 37, 3, 38]], ["Delete", ["identifier:problem_idx", 3, 38, 3, 49]], ["Delete", ["]:]", 3, 49, 3, 50]], ["Delete", ["subscript", 3, 21, 3, 50]]]
medicode/tensor2tensor@cf5f4f6dfc08d14c5115d57d6f4e53119f98dd26
fix-problem-hparams
[ { "sha": "e4e09b710421feb7b1ff35ec2108c466f65e2281", "filename": "tensor2tensor/utils/decoding.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/cf5f4f6dfc08d14c5115d57d6f4e53119f98dd26/tensor2tensor%2Futils%2Fdecoding.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/cf5f4f6dfc08d14c5115d57d6f4e53119f98dd26/tensor2tensor%2Futils%2Fdecoding.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Fdecoding.py?ref=cf5f4f6dfc08d14c5115d57d6f4e53119f98dd26", "patch": "@@ -160,7 +160,7 @@ def decode_from_dataset(estimator,\n output_file = tf.gfile.Open(output_filepath, \"w\")\n target_file = tf.gfile.Open(target_filepath, \"w\")\n \n- problem_hparams = hparams.problems[problem_idx]\n+ problem_hparams = hparams.problem_hparams\n # Inputs vocabulary is set to targets if there are no inputs in the problem,\n # e.g., for language models where the inputs are just a prefix of targets.\n has_input = \"inputs\" in problem_hparams.vocabulary" } ]
tensor2tensor
5840d5ba2c0e2f2a1dd0e6a717f190940151f885
2ba2dae062433b6adb618b7e432f25e496e9c1ed
tensor2tensor/utils/optimize.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -44,7 +44,7 @@ def optimize(loss, learning_rate, hparams, use_tpu=False): opt = tf.contrib.tpu.CrossShardOptimizer(opt) tf.summary.scalar("learning_rate", learning_rate) - opt_summaries = ["loss", "global_gradient_norm"] + opt_summaries = ["loss"] if hparams.summarize_grads: tf.logging.info("Summarizing gradients") opt_summaries.extend(["gradients", "gradient_norm", "global_gradient_norm"])
opt_summaries = [ "loss" , "global_gradient_norm" ]
opt_summaries = [ "loss" ]
SINGLE_STMT
[["Delete", [",:,", 3, 26, 3, 27]], ["Delete", ["string:\"global_gradient_norm\"", 3, 28, 3, 50]]]
medicode/tensor2tensor@5840d5ba2c0e2f2a1dd0e6a717f190940151f885
fix grad summaries
[ { "sha": "9ddfe699ae326a51d83da5b1790887dcffeeefa4", "filename": "tensor2tensor/utils/optimize.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/5840d5ba2c0e2f2a1dd0e6a717f190940151f885/tensor2tensor%2Futils%2Foptimize.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/5840d5ba2c0e2f2a1dd0e6a717f190940151f885/tensor2tensor%2Futils%2Foptimize.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Futils%2Foptimize.py?ref=5840d5ba2c0e2f2a1dd0e6a717f190940151f885", "patch": "@@ -44,7 +44,7 @@ def optimize(loss, learning_rate, hparams, use_tpu=False):\n opt = tf.contrib.tpu.CrossShardOptimizer(opt)\n \n tf.summary.scalar(\"learning_rate\", learning_rate)\n- opt_summaries = [\"loss\", \"global_gradient_norm\"]\n+ opt_summaries = [\"loss\"]\n if hparams.summarize_grads:\n tf.logging.info(\"Summarizing gradients\")\n opt_summaries.extend([\"gradients\", \"gradient_norm\", \"global_gradient_norm\"])" } ]
tensor2tensor
74ff7072c069b01cfa85bad7da7bf6770fd2802f
f8cf3582d2b2f7a30e69f02a31da2acd44e7f7c6
tensor2tensor/models/transformer.py
https://github.com/medicode/tensor2tensor
true
false
true
@@ -1422,7 +1422,7 @@ def transformer_base_range(rhp): rhp.set_float("initializer_gain", 0.5, 2.0) rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) - rhp.set_float("weight_decay", 0.0, 2.0) + rhp.set_float("weight_decay", 0.0, 1e-4) @registry.register_hparams
rhp . set_float ( "weight_decay" , 0.0 , 2.0 )
rhp . set_float ( "weight_decay" , 0.0 , 1e-4 )
CHANGE_NUMERIC_LITERAL
[["Update", ["float:2.0", 3, 38, 3, 41], "1e-4"]]
medicode/tensor2tensor@74ff7072c069b01cfa85bad7da7bf6770fd2802f
Change weight_decay range in hparams transformer_base_range because the default is quite large. PiperOrigin-RevId: 199575307
[ { "sha": "1c7ca455377c8e12c7d930e1bf5f244505a7ed9b", "filename": "tensor2tensor/models/transformer.py", "status": "modified", "additions": 1, "deletions": 1, "changes": 2, "blob_url": "https://github.com/medicode/tensor2tensor/blob/74ff7072c069b01cfa85bad7da7bf6770fd2802f/tensor2tensor%2Fmodels%2Ftransformer.py", "raw_url": "https://github.com/medicode/tensor2tensor/raw/74ff7072c069b01cfa85bad7da7bf6770fd2802f/tensor2tensor%2Fmodels%2Ftransformer.py", "contents_url": "https://api.github.com/repos/medicode/tensor2tensor/contents/tensor2tensor%2Fmodels%2Ftransformer.py?ref=74ff7072c069b01cfa85bad7da7bf6770fd2802f", "patch": "@@ -1422,7 +1422,7 @@ def transformer_base_range(rhp):\n rhp.set_float(\"initializer_gain\", 0.5, 2.0)\n rhp.set_float(\"optimizer_adam_beta1\", 0.85, 0.95)\n rhp.set_float(\"optimizer_adam_beta2\", 0.97, 0.99)\n- rhp.set_float(\"weight_decay\", 0.0, 2.0)\n+ rhp.set_float(\"weight_decay\", 0.0, 1e-4)\n \n \n @registry.register_hparams" } ]