|
"""NLUCat dataset.""" |
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
_CITATION = """\ |
|
|
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
NLUCat - Natural Language Understanding in Catalan |
|
""" |
|
|
|
_TRAIN_FILE = "train.json" |
|
_DEV_FILE = "dev.json" |
|
_TEST_FILE = "test.json" |
|
|
|
class Nlucat(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"example": datasets.Value("string"), |
|
"intent": datasets.Value("string"), |
|
"slot_text": datasets.Sequence(datasets.Value("string")), |
|
"slot_tag": datasets.Sequence(datasets.Value("string")), |
|
"start_char": datasets.Sequence(datasets.Value("string")), |
|
"end_char": datasets.Sequence(datasets.Value("string")) |
|
|
|
} |
|
), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
urls_to_download = { |
|
"train": f"{_TRAIN_FILE}", |
|
"dev": f"{_DEV_FILE}", |
|
"test": f"{_TEST_FILE}", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], "split": "test"}), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""Yields examples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
dataset = json.load(f) |
|
for row in dataset["data"]: |
|
example = row["example"] |
|
intent = row["annotation"]["intent"] |
|
slot_text = [slot["Text"] for slot in row["annotation"]["slots"]] |
|
slot_tag = [slot["Tag"] for slot in row["annotation"]["slots"]] |
|
|
|
start_char = [answer["Start_char"] for answer in row["annotation"]["slots"]] |
|
end_char = [answer["End_char"] for answer in row["annotation"]["slots"]] |
|
|
|
yield row["id"], { |
|
"example": example, |
|
"intent": intent, |
|
"slot_text": slot_text, |
|
"slot_tag": slot_tag, |
|
"start_char": start_char, |
|
"end_char": end_char |
|
} |
|
|