|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
The IDKMRC-NLI dataset is derived from the IDK-MRC \ |
|
question answering dataset, utilizing named \ |
|
entity recognition (NER), chunking tags, \ |
|
Regex, and embedding similarity techniques \ |
|
to determine its contradiction sets. \ |
|
Collected through this process, \ |
|
the dataset comprises various columns beyond \ |
|
premise, hypothesis, and label, including \ |
|
properties aligned with NER and chunking tags. \ |
|
This dataset is designed to facilitate Natural\ |
|
Language Inference (NLI) tasks and contains \ |
|
information extracted from diverse sources \ |
|
to provide comprehensive coverage. Each data \ |
|
instance encapsulates premise, hypothesis, label, \ |
|
and additional properties pertinent to NLI evaluation. |
|
""" |
|
import csv |
|
from pathlib import Path |
|
from typing import Dict, List, Tuple |
|
|
|
import datasets |
|
|
|
from seacrowd.utils import schemas |
|
from seacrowd.utils.configs import SEACrowdConfig |
|
from seacrowd.utils.constants import Tasks, Licenses |
|
|
|
|
|
_CITATION = """\ |
|
@article{, |
|
author = {}, |
|
title = {}, |
|
journal = {}, |
|
volume = {}, |
|
year = {}, |
|
url = {}, |
|
doi = {}, |
|
biburl = {}, |
|
bibsource = {} |
|
} |
|
""" |
|
|
|
_DATASETNAME = "idk_mrc_nli" |
|
|
|
|
|
|
|
_DESCRIPTION = """ |
|
The IDKMRC-NLI dataset is derived from the IDK-MRC \ |
|
question answering dataset, utilizing named \ |
|
entity recognition (NER), chunking tags, \ |
|
Regex, and embedding similarity techniques \ |
|
to determine its contradiction sets. \ |
|
Collected through this process, \ |
|
the dataset comprises various columns beyond \ |
|
premise, hypothesis, and label, including \ |
|
properties aligned with NER and chunking tags. \ |
|
This dataset is designed to facilitate Natural\ |
|
Language Inference (NLI) tasks and contains \ |
|
information extracted from diverse sources \ |
|
to provide comprehensive coverage. Each data \ |
|
instance encapsulates premise, hypothesis, label, \ |
|
and additional properties pertinent to NLI evaluation. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli" |
|
|
|
_LANGUAGES = ["ind"] |
|
|
|
_LICENSE = Licenses.UNKNOWN.value |
|
|
|
_LOCAL = False |
|
|
|
_URLS = { |
|
"train": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/resolve/main/idk-mrc_nli_train_df.csv?download=true", |
|
"val": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_val_df.csv", |
|
"test": "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_test_df.csv", |
|
} |
|
|
|
_SUPPORTED_TASKS = [Tasks.TEXTUAL_ENTAILMENT] |
|
|
|
_SOURCE_VERSION = "1.0.0" |
|
|
|
_SEACROWD_VERSION = "2024.06.20" |
|
|
|
|
|
class IDKMRCNLIDataset(datasets.GeneratorBasedBuilder): |
|
""" |
|
The IDKMRC-NLI dataset is derived from the IDK-MRC \ |
|
question answering dataset, utilizing named \ |
|
entity recognition (NER), chunking tags, \ |
|
Regex, and embedding similarity techniques \ |
|
to determine its contradiction sets. \ |
|
Collected through this process, \ |
|
the dataset comprises various columns beyond \ |
|
premise, hypothesis, and label, including \ |
|
properties aligned with NER and chunking tags. \ |
|
This dataset is designed to facilitate Natural\ |
|
Language Inference (NLI) tasks and contains \ |
|
information extracted from diverse sources \ |
|
to provide comprehensive coverage. Each data \ |
|
instance encapsulates premise, hypothesis, label, \ |
|
and additional properties pertinent to NLI evaluation. |
|
""" |
|
|
|
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
|
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION) |
|
|
|
BUILDER_CONFIGS = [ |
|
SEACrowdConfig( |
|
name=f"{_DATASETNAME}_source", |
|
version=SOURCE_VERSION, |
|
description=f"{_DATASETNAME} source schema", |
|
schema="source", |
|
subset_id=f"{_DATASETNAME}", |
|
), |
|
SEACrowdConfig( |
|
name=f"{_DATASETNAME}_seacrowd_pairs", |
|
version=SEACROWD_VERSION, |
|
description=f"{_DATASETNAME} SEACrowd schema", |
|
schema="seacrowd_pairs", |
|
subset_id=f"{_DATASETNAME}", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source" |
|
labels = ["entailment", "neutral", "contradiction"] |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
if self.config.schema == "source": |
|
features = datasets.Features( |
|
{ |
|
"premise": datasets.Value("string"), |
|
"hypothesis": datasets.Value("string"), |
|
"label": datasets.ClassLabel(names=self.labels), |
|
} |
|
) |
|
|
|
elif self.config.schema == "seacrowd_pairs": |
|
features = schemas.pairs_features(self.labels) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
"""Returns SplitGenerators.""" |
|
|
|
train_path = dl_manager.download_and_extract(_URLS["train"]) |
|
val_path = dl_manager.download_and_extract(_URLS["val"]) |
|
test_path = dl_manager.download_and_extract(_URLS["test"]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": train_path, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": test_path, |
|
"split": "test", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": val_path, |
|
"split": "val", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]: |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
if self.config.schema == "source": |
|
with open(filepath, encoding="utf-8") as csv_file: |
|
csv_reader = csv.DictReader(csv_file) |
|
for id, row in enumerate(csv_reader): |
|
yield id, {"premise": row["premise"], "hypothesis": row["hypothesis"], "label": row["label"]} |
|
|
|
elif self.config.schema == "seacrowd_pairs": |
|
with open(filepath, encoding="utf-8") as csv_file: |
|
csv_reader = csv.DictReader(csv_file) |
|
for id, row in enumerate(csv_reader): |
|
yield id, {"id": str(id), "text_1": row["premise"], "text_2": row["hypothesis"], "label": row["label"]} |
|
|
|
|
|
|
|
|
|
|