IE_SemParse / IE-SemParse.py
Divyanshu's picture
update gene
b6ad962
raw
history blame
4.4 kB
# coding=utf-8
# Lint as: python3
"""IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
import os
import json
import pandas as pd
import datasets
from datasets import DownloadManager
_CITATION = """\
@misc{aggarwal2023evaluating,
title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
year={2023},
eprint={2304.13005},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
"""
_LANGUAGES = (
'hi',
'bn',
'mr',
'as',
'ta',
'te',
'or',
'ml',
'pa',
'gu',
'kn'
)
_DATASETS = (
'itop',
'indic-atis',
'indic-TOP'
)
_URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
class IESemParseConfig(datasets.BuilderConfig):
"""BuilderConfig for IE-SemParse."""
def __init__(self, dataset: str, language: str, **kwargs):
"""BuilderConfig for IE-SemParse.
Args:
language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
**kwargs: keyword arguments forwarded to super.
"""
super(IESemParseConfig, self).__init__(**kwargs)
self.dataset = dataset
self.language = language
self.languages = _LANGUAGES
self.datasets = _DATASETS
self._URLS = [os.path.join(
_URL, "unfiltered_data", dataset, f"{language}.json")]
class IESemParse(datasets.GeneratorBasedBuilder):
"""IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
VERSION = datasets.Version("1.0.0", "")
BUILDER_CONFIG_CLASS = IESemParseConfig
BUILDER_CONFIGS = [
IESemParseConfig(
name=f"{dataset}_{language}",
language=language,
dataset=dataset,
version=datasets.Version("1.0.0", ""),
description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
)
for language, dataset in zip(_LANGUAGES, _DATASETS)
]
def _info(self):
dl_manager = datasets.DownloadManager()
urls_to_download = self.config._URLS
filepath = dl_manager.download_and_extract(urls_to_download)[0]
with open(filepath, "r") as f:
data = json.load(f)
data = data[list(data.keys())[0]]
features = datasets.Features(
{k: datasets.Value("string") for k in data[0].keys()}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = self.config._URLS
downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split_key": "train",
"file_path": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split_key": "test",
"files": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split_key": "val",
"files": downloaded_file,
"data_format": "IE-SemParse"
},
),
]
def _generate_examples(self, data_format, split_key, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, "r") as f:
data = json.load(f)
data = data[split_key]
for idx, row in enumerate(data):
yield idx, {
k: v for k, v in row.items()
}