|
import csv |
|
|
|
import datasets |
|
from datasets.tasks import Summarization |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{hasan-etal-2021-xl, |
|
title = "{XL}-Sum: Large-Scale Multilingual Abstractive Summarization for 44 Languages", |
|
author = "Hasan, Tahmid and |
|
Bhattacharjee, Abhik and |
|
Islam, Md. Saiful and |
|
Mubasshir, Kazi and |
|
Li, Yuan-Fang and |
|
Kang, Yong-Bin and |
|
Rahman, M. Sohel and |
|
Shahriyar, Rifat", |
|
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", |
|
month = aug, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.findings-acl.413", |
|
pages = "4693--4703", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """Persian portion of the XLSum Dataset""" |
|
|
|
_DOWNLOAD_URLS = { |
|
"train": "https://huggingface.co/datasets/hezarai/xlsum-fa/resolve/main/xlsum-fa_train.csv", |
|
"test": "https://huggingface.co/datasets/hezarai/xlsum-fa/resolve/main/xlsum-fa_test.csv", |
|
} |
|
|
|
|
|
class XLSumFaConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super(XLSumFaConfig, self).__init__(**kwargs) |
|
|
|
|
|
class XLSumFa(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
XLSumFaConfig( |
|
name="xlsum-fa", |
|
version=datasets.Version("1.0.0"), |
|
description=_DESCRIPTION, |
|
), |
|
] |
|
|
|
def _info(self): |
|
text_column = "text" |
|
summary_column = "summary" |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{text_column: datasets.Value("string"), |
|
summary_column: datasets.features.Value("string")} |
|
), |
|
homepage="https://huggingface.co/datasets/hezarai/xlsum-fa", |
|
citation=_CITATION, |
|
task_templates=[Summarization(text_column=text_column, summary_column=summary_column)], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
Returns SplitGenerators. |
|
""" |
|
train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"]) |
|
test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
""" |
|
Per each file_path read the csv file and iterate it. |
|
For each row yield a tuple of (id, {"text": ..., "summary": ..., ...}) |
|
Each call to this method yields an output like below: |
|
``` |
|
(123, {"text": "...", "summary": "..."}) |
|
``` |
|
""" |
|
logger.info("⏳ Generating examples from = %s", filepath) |
|
with open(filepath, encoding="utf-8") as csv_file: |
|
csv_reader = csv.reader( |
|
csv_file, quotechar='"', skipinitialspace=True |
|
) |
|
|
|
next(csv_reader, None) |
|
|
|
for id_, row in enumerate(csv_reader): |
|
text, label = row |
|
yield id_, {"text": text, "summary": label} |
|
|