|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Indo-Aryan Language Identification Shared Task Dataset""" |
|
|
|
|
|
import datasets |
|
from datasets.tasks import TextClassification |
|
|
|
|
|
_CITATION = r"""\ |
|
@inproceedings{zampieri-etal-2018-language, |
|
title = "Language Identification and Morphosyntactic Tagging: The Second {V}ar{D}ial Evaluation Campaign", |
|
author = {Zampieri, Marcos and |
|
Malmasi, Shervin and |
|
Nakov, Preslav and |
|
Ali, Ahmed and |
|
Shon, Suwon and |
|
Glass, James and |
|
Scherrer, Yves and |
|
Samard{\v{z}}i{\'c}, Tanja and |
|
Ljube{\v{s}}i{\'c}, Nikola and |
|
Tiedemann, J{\"o}rg and |
|
van der Lee, Chris and |
|
Grondelaers, Stefan and |
|
Oostdijk, Nelleke and |
|
Speelman, Dirk and |
|
van den Bosch, Antal and |
|
Kumar, Ritesh and |
|
Lahiri, Bornini and |
|
Jain, Mayank}, |
|
booktitle = "Proceedings of the Fifth Workshop on {NLP} for Similar Languages, Varieties and Dialects ({V}ar{D}ial 2018)", |
|
month = aug, |
|
year = "2018", |
|
address = "Santa Fe, New Mexico, USA", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/W18-3901", |
|
pages = "1--17", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This dataset is introduced in a task which aimed at identifying 5 closely-related languages of Indo-Aryan language family – |
|
Hindi (also known as Khari Boli), Braj Bhasha, Awadhi, Bhojpuri, and Magahi. |
|
""" |
|
|
|
_URL = "https://raw.githubusercontent.com/kmi-linguistics/vardial2018/master/dataset/{}.txt" |
|
|
|
|
|
class Ilist(datasets.GeneratorBasedBuilder): |
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"language_id": datasets.ClassLabel(names=["AWA", "BRA", "MAG", "BHO", "HIN"]), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/kmi-linguistics/vardial2018", |
|
citation=_CITATION, |
|
task_templates=[TextClassification(text_column="text", label_column="language_id")], |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
filepaths = dl_manager.download_and_extract( |
|
{ |
|
"train": _URL.format("train"), |
|
"test": _URL.format("gold"), |
|
"dev": _URL.format("dev"), |
|
} |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": filepaths["train"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": filepaths["test"], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": filepaths["dev"], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
with open(filepath, "r", encoding="utf-8") as file: |
|
for idx, row in enumerate(file): |
|
row = row.strip("\n").split("\t") |
|
if len(row) == 1: |
|
continue |
|
yield idx, {"language_id": row[1], "text": row[0]} |
|
|