Update xor-tydi.py
Browse files- xor-tydi.py +9 -6
xor-tydi.py
CHANGED
@@ -17,6 +17,9 @@
|
|
17 |
"""Wikipedia NQ dataset."""
|
18 |
|
19 |
import json
|
|
|
|
|
|
|
20 |
|
21 |
import datasets
|
22 |
|
@@ -113,14 +116,14 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
|
|
113 |
# data['answers'] = []
|
114 |
# return data['query_id'], data
|
115 |
|
116 |
-
def process_train_entry(data):
|
117 |
positive_ctxs = data["positive_ctxs"]
|
118 |
hard_negative_ctxs = data["hard_negative_ctxs"]
|
119 |
# each ctx: {'title':... , 'text': ....}
|
120 |
|
121 |
-
|
122 |
-
return
|
123 |
-
"query_id":
|
124 |
"query": data["question"],
|
125 |
"answers": data.get("answers", []),
|
126 |
"positive_passages": [{**doc, 'docid': f'pos-{i}-{random.randint()}'} for i, doc in enumerate(positive_ctxs)],
|
@@ -148,6 +151,6 @@ class XORTyDi(datasets.GeneratorBasedBuilder):
|
|
148 |
else:
|
149 |
with open(filepath, encoding="utf-8") as f:
|
150 |
all_data = json.load(f)
|
151 |
-
for data in all_data:
|
152 |
-
yield process_train_entry(data)
|
153 |
|
|
|
17 |
"""Wikipedia NQ dataset."""
|
18 |
|
19 |
import json
|
20 |
+
import random
|
21 |
+
|
22 |
+
random.seed(42)
|
23 |
|
24 |
import datasets
|
25 |
|
|
|
116 |
# data['answers'] = []
|
117 |
# return data['query_id'], data
|
118 |
|
119 |
+
def process_train_entry(data, _id):
|
120 |
positive_ctxs = data["positive_ctxs"]
|
121 |
hard_negative_ctxs = data["hard_negative_ctxs"]
|
122 |
# each ctx: {'title':... , 'text': ....}
|
123 |
|
124 |
+
_id = f"{_id}-{random.randint()}"
|
125 |
+
return _id, {
|
126 |
+
"query_id": _id,
|
127 |
"query": data["question"],
|
128 |
"answers": data.get("answers", []),
|
129 |
"positive_passages": [{**doc, 'docid': f'pos-{i}-{random.randint()}'} for i, doc in enumerate(positive_ctxs)],
|
|
|
151 |
else:
|
152 |
with open(filepath, encoding="utf-8") as f:
|
153 |
all_data = json.load(f)
|
154 |
+
for i, data in enumerate(all_data):
|
155 |
+
yield process_train_entry(data, i)
|
156 |
|