|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""mMARCO dataset.""" |
|
|
|
from collections import defaultdict |
|
from gc import collect |
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@misc{bonifacio2021mmarco, |
|
title={mMARCO: A Multilingual Version of the MS MARCO Passage Ranking Dataset}, |
|
author={Luiz Henrique Bonifacio and Israel Campiotti and Vitor Jeronymo and Hugo Queiroz Abonizio and Roberto Lotufo and Rodrigo Nogueira}, |
|
year={2021}, |
|
eprint={2108.13897}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_URL = "https://github.com/unicamp-dl/mMARCO" |
|
|
|
_DESCRIPTION = """ |
|
mMARCO translated datasets |
|
""" |
|
|
|
|
|
_BASE_URLS = { |
|
"collections": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/collections/", |
|
"queries-train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/train/", |
|
"queries-dev": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/queries/dev/", |
|
"runs": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/google/runs/", |
|
"train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/triples.train.ids.small.tsv", |
|
} |
|
|
|
LANGUAGES = [ |
|
"arabic", |
|
"chinese", |
|
"dutch", |
|
"english", |
|
"french", |
|
"german", |
|
"hindi", |
|
"indonesian", |
|
"italian", |
|
"japanese", |
|
"portuguese", |
|
"russian", |
|
"spanish", |
|
"vietnamese", |
|
] |
|
|
|
|
|
class MMarcoDev(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = ( |
|
[ |
|
datasets.BuilderConfig( |
|
name=language, |
|
description=f"{language.capitalize()} dev queries", |
|
version=datasets.Version("2.0.0"), |
|
) |
|
for language in LANGUAGES |
|
] |
|
) |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "english" |
|
|
|
def _info(self): |
|
name = self.config.name |
|
assert name in LANGUAGES, f"Does not support languge {name}. Must be one of {LANGUAGES}." |
|
|
|
features = { |
|
"query_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"positive_passages": [ |
|
{'docid': datasets.Value('string'), 'text': datasets.Value('string')} |
|
], |
|
"negative_passages": [ |
|
{'docid': datasets.Value('string'), 'text': datasets.Value('string')} |
|
], |
|
} |
|
|
|
return datasets.DatasetInfo( |
|
description=f"{_DESCRIPTION}\n{self.config.description}", |
|
features=datasets.Features(features), |
|
supervised_keys=None, |
|
homepage=_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
languages = [self.config.name] if self.config.name in LANGUAGES else LANGUAGES |
|
urls = { |
|
|
|
"queries": {lang: _BASE_URLS["queries-dev"] + lang + "_queries.dev.small.tsv" for lang in languages}, |
|
} |
|
dl_path = dl_manager.download_and_extract(urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name="dev", |
|
gen_kwargs={ |
|
"args": { |
|
"queries": dl_path["queries"], |
|
}, |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, args=None): |
|
"""Yields examples.""" |
|
|
|
lang = self.config.name |
|
assert lang in LANGUAGES |
|
|
|
|
|
queries_path = args["queries"][lang] |
|
with open(queries_path, encoding="utf-8") as f: |
|
for line in f: |
|
query_id, query = line.rstrip().split("\t") |
|
features = { |
|
"query_id": query_id, |
|
"query": query, |
|
"positive_passages": [], |
|
"negative_passages": [], |
|
} |
|
yield f"{lang}-{query_id}", features |
|
|