|
import datasets |
|
import json |
|
import yaml |
|
import urllib.request |
|
|
|
_DESCRIPTION = """\ |
|
MegaWika is a multi- and crosslingual text dataset containing 30 million |
|
Wikipedia passages with their scraped and cleaned web citations. The |
|
passages span 50 Wikipedias in 50 languages, and the articles in which |
|
the passages were originally embedded are included for convenience.""" |
|
|
|
_CITATION = """\ |
|
@article{barham2023megawika, |
|
title={MegaWika: Millions of reports and their sources across 50 diverse languages}, |
|
author={Barham, Samuel and Weller, Orion and others}, |
|
journal={INSERT ARXIV PREPRINT ID HERE}, |
|
year={2023} |
|
}""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/conceptofmind/MegaWika" |
|
_LICENSE = "cc-by-sa-4.0" |
|
|
|
|
|
file_list_url = "https://huggingface.co/datasets/conceptofmind/MegaWika/raw/main/files.yml" |
|
|
|
def get_data_urls(): |
|
with urllib.request.urlopen(file_list_url) as f: |
|
try: |
|
fnames = yaml.safe_load(f) |
|
return fnames['fnames'] |
|
except yaml.YAMLError as exc: |
|
print("Error loading the file paths for the dataset splits. Aborting.") |
|
return {} |
|
|
|
class MegaWikaConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for MegaWika.""" |
|
|
|
def __init__(self, language=None, **kwargs): |
|
"""BuilderConfig for MegaWika. |
|
|
|
Args: |
|
language: The language of the dataset split |
|
**kwargs: Keyword arguments forwarded to super. |
|
""" |
|
super(MegaWikaConfig, self).__init__(**kwargs) |
|
self.language = language |
|
|
|
class MegaWika(datasets.GeneratorBasedBuilder): |
|
"""MegaWika dataset.""" |
|
|
|
|
|
_DATA_URL = get_data_urls() |
|
BUILDER_CONFIGS = [ |
|
MegaWikaConfig( |
|
name=lang if lang != "all" else "default", |
|
language=lang, |
|
version=datasets.Version("1.0.0"), |
|
description=f"MegaWika {lang} configuration" |
|
) |
|
for lang in ["all"] + list(_DATA_URL.keys()) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"article_title": datasets.Value("string"), |
|
"article_text": datasets.Value("string"), |
|
"entries": datasets.features.Sequence( |
|
{ |
|
"id": datasets.Value("string"), |
|
"passage": { |
|
"text": [datasets.Value("string")], |
|
"parse": datasets.Value("string"), |
|
"en_tokens": [datasets.Value("string")], |
|
"lang_tokens": [datasets.Value("string")], |
|
"en_lang_token_map": [[datasets.Value("int32")]] |
|
}, |
|
"mt": { |
|
"original": datasets.Value("string"), |
|
"original_sents": [datasets.Value("string")], |
|
"translation": datasets.Value("string"), |
|
"translation_sents": [datasets.Value("string")], |
|
"translation_probs": [[datasets.Value("string")]], |
|
"repetitious_translation": datasets.Value("bool") |
|
}, |
|
"source_lang": datasets.Value("string"), |
|
"source_url": datasets.Value("string"), |
|
"source_text": datasets.Value("string"), |
|
"qa_pairs": datasets.Sequence( |
|
{ |
|
"question": datasets.Value("string"), |
|
"en_answer": datasets.Value("string"), |
|
"lang_answer": datasets.Value("string"), |
|
"frames": datasets.Sequence( |
|
{ |
|
"frame": datasets.Value("string"), |
|
"argument": datasets.Value("string") |
|
} |
|
), |
|
"en_matches_in_source": [[datasets.Value("int32")]], |
|
"en_match_in_passage": [datasets.Value("int32")], |
|
"lang_matches_in_source": [[datasets.Value("int32")]], |
|
"lang_match_in_passage": [datasets.Value("int32")], |
|
"passage": [datasets.Value("string")], |
|
"en_answer_tokens": [datasets.Value("string")], |
|
"match_disambiguated_question": datasets.Value("string"), |
|
} |
|
) |
|
} |
|
) |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
if self.config.language == "all": |
|
data_sources = self._DATA_URL |
|
else: |
|
data_sources = {self.config.language: self._DATA_URL[self.config.language]} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepaths": dl_manager.download(data_sources[lang]) |
|
} |
|
) |
|
for lang in data_sources |
|
] |
|
|
|
def _get_qa_pair_list_features(self, qa_pair, feature_name): |
|
"""Helper method to extract QA pair features.""" |
|
if feature_name in qa_pair and qa_pair[feature_name]: |
|
return qa_pair[feature_name] |
|
elif feature_name.startswith('en'): |
|
base_feature = '_'.join(feature_name.split('_')[1:]) |
|
if base_feature in qa_pair and qa_pair[base_feature]: |
|
return qa_pair[base_feature] |
|
return [] |
|
|
|
def _generate_examples(self, filepaths): |
|
"""Yields examples.""" |
|
id_ = 0 |
|
for filepath in filepaths: |
|
try: |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
for line in f: |
|
if line: |
|
example = json.loads(line) |
|
if example is not None and isinstance(example, dict): |
|
yield id_, { |
|
"article_title": example.get("article_title", ""), |
|
"article_text": example.get("article_text", ""), |
|
"entries": [ |
|
{ |
|
"id": entry.get("id", "").lower(), |
|
"passage": { |
|
"text": entry['passage'].get("text", []), |
|
"parse": json.dumps(entry['passage'].get("parse", [{}])), |
|
"en_tokens": list(entry['passage'].get("en_tokens", {}).values()), |
|
"lang_tokens": list(entry['passage'].get("lang_tokens", {}).values()), |
|
"en_lang_token_map": [ |
|
(int(item[0]), int(item[1])) |
|
for item in entry['passage'].get("en_lang_token_map", {}).items() |
|
] |
|
}, |
|
"mt": { |
|
"original": entry.get("original", ""), |
|
"original_sents": entry.get("original_sents", []), |
|
"translation": entry.get("translation", ""), |
|
"translation_sents": entry.get("translation_sents", []), |
|
"translation_probs": entry.get("translation_probs", [[]]), |
|
"repetitious_translation": entry.get("repetitious_translation", False) |
|
}, |
|
"source_lang": entry.get("source_lang", ""), |
|
"source_url": entry.get("source_url", ""), |
|
"source_text": entry.get("source_text", ""), |
|
"qa_pairs": [ |
|
{ |
|
"question": qa_pair.get('question', ""), |
|
"en_answer": qa_pair.get('en_answer', qa_pair.get('answer', "")), |
|
'lang_answer': qa_pair.get('lang_answer', ''), |
|
'frames': qa_pair.get('frames', []), |
|
"en_matches_in_source": self._get_qa_pair_list_features(qa_pair, "en_matches_in_source"), |
|
"en_match_in_passage": self._get_qa_pair_list_features(qa_pair, "en_match_in_passage"), |
|
"lang_matches_in_source": self._get_qa_pair_list_features(qa_pair, "lang_matches_in_source"), |
|
"lang_match_in_passage": self._get_qa_pair_list_features(qa_pair, "lang_match_in_passage"), |
|
"passage": qa_pair.get('passage', []), |
|
"en_answer_tokens": qa_pair.get('en_answer_tokens', qa_pair.get('answer_tokens', [])), |
|
"match_disambiguated_question": qa_pair.get('match_disambiguated_question', ""), |
|
} |
|
for qa_pair in entry.get('qa_pairs', []) |
|
] |
|
} |
|
for entry in example.get("entries", []) |
|
] |
|
} |
|
id_ += 1 |
|
except Exception as e: |
|
print(f"Error reading file {filepath}: {str(e)}") |