|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{mfaq_a_multilingual_dataset, |
|
title={MFAQ: a Multilingual FAQ Dataset}, |
|
author={Maxime {De Bruyn} and Ehsan Lotfi and Jeska Buhmann and Walter Daelemans}, |
|
year={2021}, |
|
booktitle={MRQA @ EMNLP 2021} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
We present the first multilingual FAQ dataset publicly available. We collected around 6M FAQ pairs from the web, in 21 different languages. |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
|
|
_LANGUAGES = ["cs", "da", "de", "en", "es", "fi", "fr", "he", "hr", "hu", "id", "it", "nl", "no", "pl", "pt", "ro", "ru", "sv", "tr", "vi"] |
|
_URLs = {} |
|
_URLs.update({f"{l}": {"train": [f"data/{l}/train.jsonl"], "valid": [f"data/{l}/valid.jsonl"]} for l in _LANGUAGES}) |
|
_URLs["all"] = {"train": [f"data/{l}/train.jsonl" for l in _LANGUAGES], "valid": [f"data/{l}/valid.jsonl" for l in _LANGUAGES]} |
|
_URLs.update({f"{l}_flat": {"train": [f"data/{l}/train.jsonl"], "valid": [f"data/{l}/valid.jsonl"]} for l in _LANGUAGES}) |
|
_URLs["all_flat"] = {"train": [f"data/{l}/train.jsonl" for l in _LANGUAGES], "valid": [f"data/{l}/valid.jsonl" for l in _LANGUAGES]} |
|
|
|
|
|
class MFAQ(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = list(map(lambda x: datasets.BuilderConfig(name=x, version=datasets.Version("1.1.0")), _URLs.keys())) |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
if "_flat" in self.config.name: |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"language": datasets.Value("string"), |
|
"num_pairs": datasets.Value("int64"), |
|
"domain": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"answer": datasets.Value("string") |
|
} |
|
) |
|
else: |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int64"), |
|
"language": datasets.Value("string"), |
|
"num_pairs": datasets.Value("int64"), |
|
"domain": datasets.Value("string"), |
|
"qa_pairs": [ |
|
{ |
|
"question": datasets.Value("string"), |
|
"answer": datasets.Value("string"), |
|
"language": datasets.Value("string") |
|
} |
|
] |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
my_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download_and_extract(my_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepaths": data_dir["train"], "split": "train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepaths": data_dir["valid"], "split": "valid"}, |
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, filepaths, split |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
for filepath in filepaths: |
|
with open(filepath, encoding="utf-8") as f: |
|
for _id, row in enumerate(f): |
|
data = json.loads(row) |
|
if "flat" in self.config.name: |
|
for i, pair in enumerate(data["qa_pairs"]): |
|
yield f"{filepath}_{_id}_{i}", { |
|
"id": data["id"], |
|
"domain": data["domain"], |
|
"language": data["language"], |
|
"num_pairs": 1, |
|
"question": pair["question"], |
|
"answer": pair["answer"] |
|
} |
|
else: |
|
yield f"{filepath}_{_id}", { |
|
"id": data["id"], |
|
"domain": data["domain"], |
|
"language": data["language"], |
|
"num_pairs": data["num_pairs"], |
|
"qa_pairs": data["qa_pairs"] |
|
} |
|
|