conceptofmind commited on
Commit
37af255
·
verified ·
1 Parent(s): 07118e2

Update megawika.py

Browse files
Files changed (1) hide show
  1. megawika.py +79 -151
megawika.py CHANGED
@@ -1,105 +1,66 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and
2
- # the Johns Hopkins University (JHU) Human Language Technology
3
- # Center of Excellence.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
- # limitations under the License.
16
- """
17
- This file provides a HuggingFace dataset loader implementation for
18
- the JHU/HLTCOE MegaWika dataset.
19
- MegaWika is a multi- and crosslingual text dataset containing 30 million
20
- Wikipedia passages with their scraped and cleaned web citations. The
21
- passages span 50 Wikipedias in 50 languages, and the articles in which
22
- the passages were originally embedded are included for convenience. Where
23
- a Wikipedia passage is in a non-English language, an automated English
24
- translation is provided. Furthermore, nearly 130 million English
25
- question/answer pairs were extracted from the passages, and FrameNet events
26
- occurring in the passages are detected using the LOME FrameNet parser.
27
- """
28
-
29
-
30
- import csv
31
  import json
32
- import os
33
- import re
34
- import pathlib
35
- from pathlib import Path
36
  import yaml
37
- from ast import literal_eval
38
-
39
- import datasets
40
-
41
- # import gzip
42
- # try:
43
- # import lzma as xz
44
- # except ImportError:
45
- # import pylzma as xz
46
 
 
 
 
 
 
47
 
48
- # TODO: Add BibTeX citation
49
- # Find for instance the citation on arxiv or on the dataset repo/website
50
  _CITATION = """\
51
  @article{barham2023megawika,
52
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
53
- author={Barham, Samuel and Weller, Orion and
54
- Yuan, Michelle and Murray, Kenton and
55
- Yarmohammadi, Mahsa and Jiang, Zhengping and
56
- Vashishtha, Siddharth and Martin, Alexander and
57
- Liu, Anqi and White, Aaron Steven and
58
- Boyd-Graber, Jordan and Van Durme, Benjamin
59
- },
60
  journal={INSERT ARXIV PREPRINT ID HERE},
61
  year={2023}
62
- }
63
- """
64
-
65
- # TODO: Add description of the dataset here
66
- # You can copy an official description
67
- _DESCRIPTION = """\
68
- MegaWika is a multi- and crosslingual text dataset containing 30 million
69
- Wikipedia passages with their scraped and cleaned web citations. The
70
- passages span 50 Wikipedias in 50 languages, and the articles in which
71
- the passages were originally embedded are included for convenience. Where
72
- a Wikipedia passage is in a non-English language, an automated English
73
- translation is provided. Furthermore, nearly 130 million English
74
- question/answer pairs were extracted from the passages, and FrameNet events
75
- occurring in the passages are detected using the LOME FrameNet parser.
76
- """
77
 
78
  _HOMEPAGE = "https://huggingface.co/datasets/conceptofmind/MegaWika"
79
-
80
  _LICENSE = "cc-by-sa-4.0"
81
 
82
- _URL = "https://huggingface.co/datasets/conceptofmind/MegaWika"
83
-
84
- # Load the file paths for all the splits (per language currently)
85
-
86
  file_list_url = "https://huggingface.co/datasets/conceptofmind/MegaWika/raw/main/files.yml"
87
 
88
- import urllib.request
89
- with urllib.request.urlopen(file_list_url) as f:
90
- try:
91
- fnames = yaml.safe_load(f)
92
- except yaml.YAMLError as exc:
93
- print("Error loading the file paths for the dataset splits. Aborting.")
94
- exit(1)
95
-
96
- _DATA_URL = fnames['fnames']
97
-
98
- _VARIANTS = ["all"] + list(_DATA_URL.keys())
99
-
 
 
 
 
 
 
 
 
 
100
 
101
  class MegaWika(datasets.GeneratorBasedBuilder):
102
- BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  def _info(self):
105
  return datasets.DatasetInfo(
@@ -111,17 +72,13 @@ class MegaWika(datasets.GeneratorBasedBuilder):
111
  "entries": datasets.features.Sequence(
112
  {
113
  "id": datasets.Value("string"),
114
-
115
- # Wiki passage
116
  "passage": {
117
  "text": [datasets.Value("string")],
118
  "parse": datasets.Value("string"),
119
  "en_tokens": [datasets.Value("string")],
120
  "lang_tokens": [datasets.Value("string")],
121
- "en_lang_token_map": [[datasets.Value("int32")]] # list of pairs
122
  },
123
-
124
- # MT
125
  "mt": {
126
  "original": datasets.Value("string"),
127
  "original_sents": [datasets.Value("string")],
@@ -130,13 +87,9 @@ class MegaWika(datasets.GeneratorBasedBuilder):
130
  "translation_probs": [[datasets.Value("string")]],
131
  "repetitious_translation": datasets.Value("bool")
132
  },
133
-
134
- # Source document
135
  "source_lang": datasets.Value("string"),
136
  "source_url": datasets.Value("string"),
137
  "source_text": datasets.Value("string"),
138
-
139
- # Question/answer pairs
140
  "qa_pairs": datasets.Sequence(
141
  {
142
  "question": datasets.Value("string"),
@@ -148,10 +101,10 @@ class MegaWika(datasets.GeneratorBasedBuilder):
148
  "argument": datasets.Value("string")
149
  }
150
  ),
151
- "en_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
152
- "en_match_in_passage": [datasets.Value("int32")], # pair of int indices
153
- "lang_matches_in_source": [[datasets.Value("int32")]], # list of pair of int indices
154
- "lang_match_in_passage": [datasets.Value("int32")], # pair of int indices
155
  "passage": [datasets.Value("string")],
156
  "en_answer_tokens": [datasets.Value("string")],
157
  "match_disambiguated_question": datasets.Value("string"),
@@ -162,45 +115,42 @@ class MegaWika(datasets.GeneratorBasedBuilder):
162
  }
163
  ),
164
  supervised_keys=None,
165
- homepage=_URL,
166
  citation=_CITATION,
 
167
  )
168
 
169
  def _split_generators(self, dl_manager):
170
- if self.config.name == "all":
171
- data_sources = _DATA_URL
 
172
  else:
173
- data_sources = {self.config.name: _DATA_URL[self.config.name]}
174
 
175
  return [
176
  datasets.SplitGenerator(
177
- name=lang,
178
  gen_kwargs={
179
  "filepaths": dl_manager.download(data_sources[lang])
180
  }
181
  )
182
- for lang
183
- in data_sources
184
  ]
185
 
186
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
187
- res = []
188
-
189
- if feature_name in qa_pair:
190
- if qa_pair[feature_name]:
191
- return qa_pair[feature_name]
192
- else:
193
- if feature_name.startswith('en'):
194
- feature_name = '_'.join(feature_name.split('_')[1:])
195
- return self._get_qa_pair_list_features(qa_pair, feature_name)
196
 
197
- return res
198
-
199
  def _generate_examples(self, filepaths):
200
- """This function returns the examples in the raw (text) form by iterating on all the files."""
201
  id_ = 0
202
  for filepath in filepaths:
203
- # logger.info("Generating examples from = %s", filepath)
204
  try:
205
  with open(filepath, "r", encoding="utf-8") as f:
206
  for line in f:
@@ -216,19 +166,11 @@ class MegaWika(datasets.GeneratorBasedBuilder):
216
  "passage": {
217
  "text": entry['passage'].get("text", []),
218
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
219
- "en_tokens": list(entry['passage'].get(
220
- "en_tokens",
221
- {
222
- token: token
223
- for tokens in entry['passage'].get("tokens", {})
224
- for token in tokens
225
- }
226
- ).values()),
227
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
228
  "en_lang_token_map": [
229
  (int(item[0]), int(item[1]))
230
- for item
231
- in entry['passage'].get("en_lang_token_map", {}).items()
232
  ]
233
  },
234
  "mt": {
@@ -237,7 +179,7 @@ class MegaWika(datasets.GeneratorBasedBuilder):
237
  "translation": entry.get("translation", ""),
238
  "translation_sents": entry.get("translation_sents", []),
239
  "translation_probs": entry.get("translation_probs", [[]]),
240
- "repetitious_translation": entry.get("repetitious_translation", None)
241
  },
242
  "source_lang": entry.get("source_lang", ""),
243
  "source_url": entry.get("source_url", ""),
@@ -248,34 +190,20 @@ class MegaWika(datasets.GeneratorBasedBuilder):
248
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
249
  'lang_answer': qa_pair.get('lang_answer', ''),
250
  'frames': qa_pair.get('frames', []),
251
- "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
252
- "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
253
- "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
254
- "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
255
  "passage": qa_pair.get('passage', []),
256
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
257
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
258
  }
259
- for qa_pair
260
- in entry.get('qa_pairs', [])
261
  ]
262
  }
263
- for entry
264
- in example.get("entries", [])
265
  ]
266
  }
267
  id_ += 1
268
- except:
269
- print("Error reading file:", filepath)
270
-
271
-
272
-
273
- # "entries": datasets.features.Sequence(
274
- # {
275
- # "qa_pairs": datasets.Sequence(
276
- # {
277
- # "question": datasets.Value("string"),
278
- # "answer": datasets.Value("string"),
279
- # }
280
- # )
281
- # }
 
1
+ import datasets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import json
 
 
 
 
3
  import yaml
4
+ import urllib.request
 
 
 
 
 
 
 
 
5
 
6
+ _DESCRIPTION = """\
7
+ MegaWika is a multi- and crosslingual text dataset containing 30 million
8
+ Wikipedia passages with their scraped and cleaned web citations. The
9
+ passages span 50 Wikipedias in 50 languages, and the articles in which
10
+ the passages were originally embedded are included for convenience."""
11
 
 
 
12
  _CITATION = """\
13
  @article{barham2023megawika,
14
  title={MegaWika: Millions of reports and their sources across 50 diverse languages},
15
+ author={Barham, Samuel and Weller, Orion and others},
 
 
 
 
 
 
16
  journal={INSERT ARXIV PREPRINT ID HERE},
17
  year={2023}
18
+ }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  _HOMEPAGE = "https://huggingface.co/datasets/conceptofmind/MegaWika"
 
21
  _LICENSE = "cc-by-sa-4.0"
22
 
23
+ # Load the file paths for all the splits
 
 
 
24
  file_list_url = "https://huggingface.co/datasets/conceptofmind/MegaWika/raw/main/files.yml"
25
 
26
+ def get_data_urls():
27
+ with urllib.request.urlopen(file_list_url) as f:
28
+ try:
29
+ fnames = yaml.safe_load(f)
30
+ return fnames['fnames']
31
+ except yaml.YAMLError as exc:
32
+ print("Error loading the file paths for the dataset splits. Aborting.")
33
+ return {}
34
+
35
+ class MegaWikaConfig(datasets.BuilderConfig):
36
+ """BuilderConfig for MegaWika."""
37
+
38
+ def __init__(self, language=None, **kwargs):
39
+ """BuilderConfig for MegaWika.
40
+
41
+ Args:
42
+ language: The language of the dataset split
43
+ **kwargs: Keyword arguments forwarded to super.
44
+ """
45
+ super(MegaWikaConfig, self).__init__(**kwargs)
46
+ self.language = language
47
 
48
  class MegaWika(datasets.GeneratorBasedBuilder):
49
+ """MegaWika dataset."""
50
+
51
+ # Get available languages from the data URLs
52
+ _DATA_URL = get_data_urls()
53
+ BUILDER_CONFIGS = [
54
+ MegaWikaConfig(
55
+ name=lang if lang != "all" else "default",
56
+ language=lang,
57
+ version=datasets.Version("1.0.0"),
58
+ description=f"MegaWika {lang} configuration"
59
+ )
60
+ for lang in ["all"] + list(_DATA_URL.keys())
61
+ ]
62
+
63
+ DEFAULT_CONFIG_NAME = "default" # For the "all" configuration
64
 
65
  def _info(self):
66
  return datasets.DatasetInfo(
 
72
  "entries": datasets.features.Sequence(
73
  {
74
  "id": datasets.Value("string"),
 
 
75
  "passage": {
76
  "text": [datasets.Value("string")],
77
  "parse": datasets.Value("string"),
78
  "en_tokens": [datasets.Value("string")],
79
  "lang_tokens": [datasets.Value("string")],
80
+ "en_lang_token_map": [[datasets.Value("int32")]]
81
  },
 
 
82
  "mt": {
83
  "original": datasets.Value("string"),
84
  "original_sents": [datasets.Value("string")],
 
87
  "translation_probs": [[datasets.Value("string")]],
88
  "repetitious_translation": datasets.Value("bool")
89
  },
 
 
90
  "source_lang": datasets.Value("string"),
91
  "source_url": datasets.Value("string"),
92
  "source_text": datasets.Value("string"),
 
 
93
  "qa_pairs": datasets.Sequence(
94
  {
95
  "question": datasets.Value("string"),
 
101
  "argument": datasets.Value("string")
102
  }
103
  ),
104
+ "en_matches_in_source": [[datasets.Value("int32")]],
105
+ "en_match_in_passage": [datasets.Value("int32")],
106
+ "lang_matches_in_source": [[datasets.Value("int32")]],
107
+ "lang_match_in_passage": [datasets.Value("int32")],
108
  "passage": [datasets.Value("string")],
109
  "en_answer_tokens": [datasets.Value("string")],
110
  "match_disambiguated_question": datasets.Value("string"),
 
115
  }
116
  ),
117
  supervised_keys=None,
118
+ homepage=_HOMEPAGE,
119
  citation=_CITATION,
120
+ license=_LICENSE
121
  )
122
 
123
  def _split_generators(self, dl_manager):
124
+ """Returns SplitGenerators."""
125
+ if self.config.language == "all":
126
+ data_sources = self._DATA_URL
127
  else:
128
+ data_sources = {self.config.language: self._DATA_URL[self.config.language]}
129
 
130
  return [
131
  datasets.SplitGenerator(
132
+ name=datasets.Split.TRAIN, # Using TRAIN as default split
133
  gen_kwargs={
134
  "filepaths": dl_manager.download(data_sources[lang])
135
  }
136
  )
137
+ for lang in data_sources
 
138
  ]
139
 
140
  def _get_qa_pair_list_features(self, qa_pair, feature_name):
141
+ """Helper method to extract QA pair features."""
142
+ if feature_name in qa_pair and qa_pair[feature_name]:
143
+ return qa_pair[feature_name]
144
+ elif feature_name.startswith('en'):
145
+ base_feature = '_'.join(feature_name.split('_')[1:])
146
+ if base_feature in qa_pair and qa_pair[base_feature]:
147
+ return qa_pair[base_feature]
148
+ return []
 
149
 
 
 
150
  def _generate_examples(self, filepaths):
151
+ """Yields examples."""
152
  id_ = 0
153
  for filepath in filepaths:
 
154
  try:
155
  with open(filepath, "r", encoding="utf-8") as f:
156
  for line in f:
 
166
  "passage": {
167
  "text": entry['passage'].get("text", []),
168
  "parse": json.dumps(entry['passage'].get("parse", [{}])),
169
+ "en_tokens": list(entry['passage'].get("en_tokens", {}).values()),
 
 
 
 
 
 
 
170
  "lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()),
171
  "en_lang_token_map": [
172
  (int(item[0]), int(item[1]))
173
+ for item in entry['passage'].get("en_lang_token_map", {}).items()
 
174
  ]
175
  },
176
  "mt": {
 
179
  "translation": entry.get("translation", ""),
180
  "translation_sents": entry.get("translation_sents", []),
181
  "translation_probs": entry.get("translation_probs", [[]]),
182
+ "repetitious_translation": entry.get("repetitious_translation", False)
183
  },
184
  "source_lang": entry.get("source_lang", ""),
185
  "source_url": entry.get("source_url", ""),
 
190
  "en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")),
191
  'lang_answer': qa_pair.get('lang_answer', ''),
192
  'frames': qa_pair.get('frames', []),
193
+ "en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"),
194
+ "en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"),
195
+ "lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"),
196
+ "lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"),
197
  "passage": qa_pair.get('passage', []),
198
  "en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])),
199
  "match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""),
200
  }
201
+ for qa_pair in entry.get('qa_pairs', [])
 
202
  ]
203
  }
204
+ for entry in example.get("entries", [])
 
205
  ]
206
  }
207
  id_ += 1
208
+ except Exception as e:
209
+ print(f"Error reading file {filepath}: {str(e)}")