|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_OPEN_SLU_CITATION = """\ |
|
xxx""" |
|
|
|
_OPEN_SLU_DESCRIPTION = """\ |
|
xxx""" |
|
|
|
_ATIS_CITATION = """\ |
|
@inproceedings{hemphill1990atis, |
|
title = "The {ATIS} Spoken Language Systems Pilot Corpus", |
|
author = "Hemphill, Charles T. and |
|
Godfrey, John J. and |
|
Doddington, George R.", |
|
booktitle = "Speech and Natural Language: Proceedings of a Workshop Held at Hidden Valley, {P}ennsylvania, June 24-27,1990", |
|
year = "1990", |
|
url = "https://aclanthology.org/H90-1021", |
|
} |
|
""" |
|
|
|
_ATIS_DESCRIPTION = """\ |
|
A widely used SLU corpus for single-intent SLU. |
|
""" |
|
|
|
|
|
class OpenSLUConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for OpenSLU.""" |
|
|
|
def __init__(self, features, data_url, citation, url, intent_label_classes=None, slot_label_classes=None, **kwargs): |
|
"""BuilderConfig for OpenSLU. |
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
intent_label_classes: `list[string]`, the list of classes for the intent label |
|
slot_label_classes: `list[string]`, the list of classes for the slot label |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
super(OpenSLUConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs) |
|
self.features = features |
|
self.intent_label_classes = intent_label_classes |
|
self.slot_label_classes = slot_label_classes |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
|
|
|
|
class OpenSLU(datasets.GeneratorBasedBuilder): |
|
"""The SuperGLUE benchmark.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
OpenSLUConfig( |
|
name="products", |
|
description=_ATIS_DESCRIPTION, |
|
features=["text"], |
|
data_url="https://huggingface.co/datasets/rams901/OpenSLU_Clone/resolve/main/prods.tar.gz", |
|
citation=_ATIS_CITATION, |
|
url="https://aclanthology.org/H90-1021", |
|
), |
|
] |
|
|
|
def _info(self): |
|
features = {feature: datasets.Sequence(datasets.Value("string")) for feature in self.config.features} |
|
features["slot"] = datasets.Sequence(datasets.Value("string")) |
|
features["intent"] = datasets.Value("string") |
|
|
|
return datasets.DatasetInfo( |
|
description=_OPEN_SLU_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features), |
|
homepage=self.config.url, |
|
citation=self.config.citation + "\n" + _OPEN_SLU_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
print(self.config.data_url) |
|
dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" |
|
|
|
task_name = _get_task_name_from_data_url(self.config.data_url) |
|
print(dl_dir) |
|
print(task_name) |
|
dl_dir = os.path.join(dl_dir, task_name) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "train.jsonl"), |
|
"split": datasets.Split.TRAIN, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "dev.jsonl"), |
|
"split": datasets.Split.VALIDATION, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "test.jsonl"), |
|
"split": datasets.Split.TEST, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file, split): |
|
with open(data_file, encoding="utf-8") as f: |
|
for index, line in enumerate(f): |
|
row = json.loads(line) |
|
yield index, row |
|
|
|
|
|
def _cast_label(label): |
|
"""Converts the label into the appropriate string version.""" |
|
if isinstance(label, str): |
|
return label |
|
elif isinstance(label, bool): |
|
return "True" if label else "False" |
|
elif isinstance(label, int): |
|
assert label in (0, 1) |
|
return str(label) |
|
else: |
|
raise ValueError("Invalid label format.") |
|
|
|
|
|
def _get_record_entities(passage): |
|
"""Returns the unique set of entities.""" |
|
text = passage["text"] |
|
entity_spans = list() |
|
for entity in passage["entities"]: |
|
entity_text = text[entity["start"]: entity["end"] + 1] |
|
entity_spans.append({"text": entity_text, "start": entity["start"], "end": entity["end"] + 1}) |
|
entity_spans = sorted(entity_spans, key=lambda e: e["start"]) |
|
entity_texts = set(e["text"] for e in entity_spans) |
|
return entity_texts, entity_spans |
|
|
|
|
|
def _get_record_answers(qa): |
|
"""Returns the unique set of answers.""" |
|
if "answers" not in qa: |
|
return [] |
|
answers = set() |
|
for answer in qa["answers"]: |
|
answers.add(answer["text"]) |
|
return sorted(answers) |
|
|
|
|
|
def _get_task_name_from_data_url(data_url): |
|
return data_url.split("/")[-1].split(".")[0] |
|
|