VictorSanh
commited on
Commit
•
89fbe42
1
Parent(s):
e3e6197
final touches
Browse files
P3.py
CHANGED
@@ -57,7 +57,7 @@ def load_cached_task(features_dict, tfrecord):
|
|
57 |
feat: _feature_config(**desc) for feat, desc in features_dict.items()
|
58 |
}
|
59 |
|
60 |
-
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord])) #TODO handle multiple shards
|
61 |
ds = ds.map(
|
62 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
63 |
num_parallel_calls=tf.data.experimental.AUTOTUNE
|
@@ -85,16 +85,13 @@ def find_task_splits_and_features_dict():
|
|
85 |
"""Get the task available (list was pre-computed by `print_data_split_sizes.py`), and get the features for each task."""
|
86 |
task_splits_and_features = defaultdict(dict)
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
for task_name, split_sizes in data:
|
95 |
-
if "adversarial_qa" not in task_name: #TODO remove
|
96 |
-
continue
|
97 |
|
|
|
98 |
for split_name in split_sizes.keys():
|
99 |
split_info = json.loads(
|
100 |
read_from_url(
|
@@ -102,7 +99,7 @@ def find_task_splits_and_features_dict():
|
|
102 |
)
|
103 |
)
|
104 |
features_dict = split_info["features"]
|
105 |
-
assert split_info["num_shards"] == 1 #TODO ->
|
106 |
|
107 |
if not task_splits_and_features[task_name]:
|
108 |
task_splits_and_features[task_name] = {
|
@@ -119,7 +116,7 @@ _TASK_SPLITS_AND_FEATURES_DICT = find_task_splits_and_features_dict()
|
|
119 |
_URLs = {
|
120 |
task_name: {
|
121 |
split_name: {
|
122 |
-
"tfrecord": f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001",
|
123 |
}
|
124 |
for split_name in splits_and_features_dict["splits"]
|
125 |
}
|
|
|
57 |
feat: _feature_config(**desc) for feat, desc in features_dict.items()
|
58 |
}
|
59 |
|
60 |
+
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord])) # TODO -> handle multiple shards
|
61 |
ds = ds.map(
|
62 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
63 |
num_parallel_calls=tf.data.experimental.AUTOTUNE
|
|
|
85 |
"""Get the task available (list was pre-computed by `print_data_split_sizes.py`), and get the features for each task."""
|
86 |
task_splits_and_features = defaultdict(dict)
|
87 |
|
88 |
+
data_split_sizes = read_from_url(f"{_HUB_PATH}/data_split_sizes.csv")
|
89 |
+
data_split_sizes = [t.strip() for t in data_split_sizes.splitlines()]
|
90 |
+
data_split_sizes = data_split_sizes[1:]
|
91 |
+
data_split_sizes = [t.split("|") for t in data_split_sizes]
|
92 |
+
data_split_sizes = [(t[0], json.loads(t[1])) for t in data_split_sizes]
|
|
|
|
|
|
|
|
|
93 |
|
94 |
+
for task_name, split_sizes in data_split_sizes:
|
95 |
for split_name in split_sizes.keys():
|
96 |
split_info = json.loads(
|
97 |
read_from_url(
|
|
|
99 |
)
|
100 |
)
|
101 |
features_dict = split_info["features"]
|
102 |
+
assert split_info["num_shards"] == 1 # TODO -> handle multiple shards
|
103 |
|
104 |
if not task_splits_and_features[task_name]:
|
105 |
task_splits_and_features[task_name] = {
|
|
|
116 |
_URLs = {
|
117 |
task_name: {
|
118 |
split_name: {
|
119 |
+
"tfrecord": f"{_DATA_PATH}/{task_name}/{split_name}.tfrecord-00000-of-00001", # TODO -> handle multiple shards
|
120 |
}
|
121 |
for split_name in splits_and_features_dict["splits"]
|
122 |
}
|