Datasets:
conceptofmind
commited on
Commit
•
f53e412
1
Parent(s):
5e630a4
Create megawika.py
Browse files- megawika.py +281 -0
megawika.py
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and
|
2 |
+
# the Johns Hopkins University (JHU) Human Language Technology
|
3 |
+
# Center of Excellence.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""
|
17 |
+
This file provides a HuggingFace dataset loader implementation for
|
18 |
+
the JHU/HLTCOE MegaWika dataset.
|
19 |
+
MegaWika is a multi- and crosslingual text dataset containing 30 million
|
20 |
+
Wikipedia passages with their scraped and cleaned web citations. The
|
21 |
+
passages span 50 Wikipedias in 50 languages, and the articles in which
|
22 |
+
the passages were originally embedded are included for convenience. Where
|
23 |
+
a Wikipedia passage is in a non-English language, an automated English
|
24 |
+
translation is provided. Furthermore, nearly 130 million English
|
25 |
+
question/answer pairs were extracted from the passages, and FrameNet events
|
26 |
+
occurring in the passages are detected using the LOME FrameNet parser.
|
27 |
+
"""
|
28 |
+
|
29 |
+
|
30 |
+
import csv
|
31 |
+
import json
|
32 |
+
import os
|
33 |
+
import re
|
34 |
+
import pathlib
|
35 |
+
from pathlib import Path
|
36 |
+
import yaml
|
37 |
+
from ast import literal_eval
|
38 |
+
|
39 |
+
import datasets
|
40 |
+
|
41 |
+
# import gzip
|
42 |
+
# try:
|
43 |
+
# import lzma as xz
|
44 |
+
# except ImportError:
|
45 |
+
# import pylzma as xz
|
46 |
+
|
47 |
+
|
48 |
+
# TODO: Add BibTeX citation
|
49 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
50 |
+
_CITATION = """\
|
51 |
+
@article{barham2023megawika,
|
52 |
+
title={MegaWika: Millions of reports and their sources across 50 diverse languages},
|
53 |
+
author={Barham, Samuel and Weller, Orion and
|
54 |
+
Yuan, Michelle and Murray, Kenton and
|
55 |
+
Yarmohammadi, Mahsa and Jiang, Zhengping and
|
56 |
+
Vashishtha, Siddharth and Martin, Alexander and
|
57 |
+
Liu, Anqi and White, Aaron Steven and
|
58 |
+
Boyd-Graber, Jordan and Van Durme, Benjamin
|
59 |
+
},
|
60 |
+
journal={INSERT ARXIV PREPRINT ID HERE},
|
61 |
+
year={2023}
|
62 |
+
}
|
63 |
+
"""
|
64 |
+
|
65 |
+
# TODO: Add description of the dataset here
|
66 |
+
# You can copy an official description
|
67 |
+
_DESCRIPTION = """\
|
68 |
+
MegaWika is a multi- and crosslingual text dataset containing 30 million
|
69 |
+
Wikipedia passages with their scraped and cleaned web citations. The
|
70 |
+
passages span 50 Wikipedias in 50 languages, and the articles in which
|
71 |
+
the passages were originally embedded are included for convenience. Where
|
72 |
+
a Wikipedia passage is in a non-English language, an automated English
|
73 |
+
translation is provided. Furthermore, nearly 130 million English
|
74 |
+
question/answer pairs were extracted from the passages, and FrameNet events
|
75 |
+
occurring in the passages are detected using the LOME FrameNet parser.
|
76 |
+
"""
|
77 |
+
|
78 |
+
_HOMEPAGE = "https://huggingface.co/datasets/conceptofmind/MegaWika"
|
79 |
+
|
80 |
+
_LICENSE = "cc-by-sa-4.0"
|
81 |
+
|
82 |
+
_URL = "https://huggingface.co/datasets/conceptofmind/MegaWika"
|
83 |
+
|
84 |
+
# Load the file paths for all the splits (per language currently)
|
85 |
+
|
86 |
+
file_list_url = "https://huggingface.co/datasets/conceptofmind/MegaWika/raw/main/files.yml"
|
87 |
+
|
88 |
+
import urllib.request
|
89 |
+
with urllib.request.urlopen(file_list_url) as f:
|
90 |
+
try:
|
91 |
+
fnames = yaml.safe_load(f)
|
92 |
+
except yaml.YAMLError as exc:
|
93 |
+
print("Error loading the file paths for the dataset splits. Aborting.")
|
94 |
+
exit(1)
|
95 |
+
|
96 |
+
_DATA_URL = fnames['fnames']
|
97 |
+
|
98 |
+
_VARIANTS = ["all"] + list(_DATA_URL.keys())
|
99 |
+
|
100 |
+
|
101 |
+
class MegaWika(datasets.GeneratorBasedBuilder):
|
102 |
+
BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
|
103 |
+
|
104 |
+
def _info(self):
|
105 |
+
return datasets.DatasetInfo(
|
106 |
+
description=_DESCRIPTION,
|
107 |
+
features=datasets.Features(
|
108 |
+
{
|
109 |
+
"article_title": datasets.Value("string"),
|
110 |
+
"article_text": datasets.Value("string"),
|
111 |
+
"entries": datasets.features.Sequence(
|
112 |
+
{
|
113 |
+
"id": datasets.Value("string"),
|
114 |
+
|
115 |
+
# Wiki passage
|
116 |
+
"passage": {
|
117 |
+
"text": [datasets.Value("string")],
|
118 |
+
"parse": datasets.Value("string"),
|
119 |
+
"en_tokens": [datasets.Value("string")],
|
120 |
+
"lang_tokens": [datasets.Value("string")],
|
121 |
+
"en_lang_token_map": [[datasets.Value("int32")]] # list of pairs
|
122 |
+
},
|
123 |
+
|
124 |
+
# MT
|
125 |
+
"mt": {
|
126 |
+
"original": datasets.Value("string"),
|
127 |
+
"original_sents": [datasets.Value("string")],
|
128 |
+
"translation": datasets.Value("string"),
|
129 |
+
"translation_sents": [datasets.Value("string")],
|
130 |
+
"translation_probs": [[datasets.Value("string")]],
|
131 |
+
"repetitious_translation": datasets.Value("bool")
|
132 |
+
},
|
133 |
+
|
134 |
+
# Source document
|
135 |
+
"source_lang": datasets.Value("string"),
|
136 |
+
"source_url": datasets.Value("string"),
|
137 |
+
"source_text": datasets.Value("string"),
|
138 |
+
|
139 |
+
# Question/answer pairs
|
140 |
+
"qa_pairs": datasets.Sequence(
|
141 |
+
{
|
142 |
+
"question": datasets.Value("string"),
|
143 |
+
"en_answer": datasets.Value("string"),
|
144 |
+
"lang_answer": datasets.Value("string"),
|
145 |
+
"frames": datasets.Sequence(
|
146 |
+
{
|
147 |
+
"frame": datasets.Value("string"),
|
148 |
+
"argument": datasets.Value("string")
|
149 |
+
}
|
150 |
+
),
|
151 |
+
"en_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
|
152 |
+
"en_match_in_passage": [datasets.Value("int32")], # pair of int indices
|
153 |
+
"lang_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
|
154 |
+
"lang_match_in_passage": [datasets.Value("int32")], # pair of int indices
|
155 |
+
"passage": [datasets.Value("string")],
|
156 |
+
"en_answer_tokens": [datasets.Value("string")],
|
157 |
+
"match_disambiguated_question": datasets.Value("string"),
|
158 |
+
}
|
159 |
+
)
|
160 |
+
}
|
161 |
+
)
|
162 |
+
}
|
163 |
+
),
|
164 |
+
supervised_keys=None,
|
165 |
+
homepage=_URL,
|
166 |
+
citation=_CITATION,
|
167 |
+
)
|
168 |
+
|
169 |
+
def _split_generators(self, dl_manager):
|
170 |
+
if self.config.name == "all":
|
171 |
+
data_sources = _DATA_URL
|
172 |
+
else:
|
173 |
+
data_sources = {self.config.name: _DATA_URL[self.config.name]}
|
174 |
+
|
175 |
+
return [
|
176 |
+
datasets.SplitGenerator(
|
177 |
+
name=lang,
|
178 |
+
gen_kwargs={
|
179 |
+
"filepaths": dl_manager.download(data_sources[lang])
|
180 |
+
}
|
181 |
+
)
|
182 |
+
for lang
|
183 |
+
in data_sources
|
184 |
+
]
|
185 |
+
|
186 |
+
def _get_qa_pair_list_features(self, qa_pair, feature_name):
|
187 |
+
res = []
|
188 |
+
|
189 |
+
if feature_name in qa_pair:
|
190 |
+
if qa_pair[feature_name]:
|
191 |
+
return qa_pair[feature_name]
|
192 |
+
else:
|
193 |
+
if feature_name.startswith('en'):
|
194 |
+
feature_name = '_'.join(feature_name.split('_')[1:])
|
195 |
+
return self._get_qa_pair_list_features(qa_pair, feature_name)
|
196 |
+
|
197 |
+
return res
|
198 |
+
|
199 |
+
def _generate_examples(self, filepaths):
|
200 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
201 |
+
id_ = 0
|
202 |
+
for filepath in filepaths:
|
203 |
+
# logger.info("Generating examples from = %s", filepath)
|
204 |
+
try:
|
205 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
206 |
+
for line in f:
|
207 |
+
if line:
|
208 |
+
example = json.loads(line)
|
209 |
+
if example is not None and isinstance(example, dict):
|
210 |
+
yield id_, {
|
211 |
+
"article_title": example.get("article_title", ""),
|
212 |
+
"article_text": example.get("article_text", ""),
|
213 |
+
"entries": [
|
214 |
+
{
|
215 |
+
"id": entry.get("id", "").lower(),
|
216 |
+
"passage": {
|
217 |
+
"text": entry['passage'].get("text", []),
|
218 |
+
"parse": json.dumps(entry['passage'].get("parse", [{}])),
|
219 |
+
"en_tokens": list(entry['passage'].get(
|
220 |
+
"en_tokens",
|
221 |
+
{
|
222 |
+
token: token
|
223 |
+
for tokens in entry['passage'].get("tokens", {})
|
224 |
+
for token in tokens
|
225 |
+
}
|
226 |
+
).values()),
|
227 |
+
"lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
|
228 |
+
"en_lang_token_map": [
|
229 |
+
(int(item[0]), int(item[1]))
|
230 |
+
for item
|
231 |
+
in entry['passage'].get("en_lang_token_map", {}).items()
|
232 |
+
]
|
233 |
+
},
|
234 |
+
"mt": {
|
235 |
+
"original": entry.get("original", ""),
|
236 |
+
"original_sents": entry.get("original_sents", []),
|
237 |
+
"translation": entry.get("translation", ""),
|
238 |
+
"translation_sents": entry.get("translation_sents", []),
|
239 |
+
"translation_probs": entry.get("translation_probs", [[]]),
|
240 |
+
"repetitious_translation": entry.get("repetitious_translation", None)
|
241 |
+
},
|
242 |
+
"source_lang": entry.get("source_lang", ""),
|
243 |
+
"source_url": entry.get("source_url", ""),
|
244 |
+
"source_text": entry.get("source_text", ""),
|
245 |
+
"qa_pairs": [
|
246 |
+
{
|
247 |
+
"question": qa_pair.get('question', ""),
|
248 |
+
"en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
|
249 |
+
'lang_answer': qa_pair.get('lang_answer', ''),
|
250 |
+
'frames': qa_pair.get('frames', []),
|
251 |
+
"en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
|
252 |
+
"en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
|
253 |
+
"lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
|
254 |
+
"lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
|
255 |
+
"passage": qa_pair.get('passage', []),
|
256 |
+
"en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
|
257 |
+
"match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
|
258 |
+
}
|
259 |
+
for qa_pair
|
260 |
+
in entry.get('qa_pairs', [])
|
261 |
+
]
|
262 |
+
}
|
263 |
+
for entry
|
264 |
+
in example.get("entries", [])
|
265 |
+
]
|
266 |
+
}
|
267 |
+
id_ += 1
|
268 |
+
except:
|
269 |
+
print("Error reading file:", filepath)
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
# "entries": datasets.features.Sequence(
|
274 |
+
# {
|
275 |
+
# "qa_pairs": datasets.Sequence(
|
276 |
+
# {
|
277 |
+
# "question": datasets.Value("string"),
|
278 |
+
# "answer": datasets.Value("string"),
|
279 |
+
# }
|
280 |
+
# )
|
281 |
+
# }
|