webnlg-qa / webnlgqa.py
glecorve's picture
Inflate dataset
a9a54e8
raw
history blame
8.15 kB
import os
import zipfile
import json
import base64
import sys
import traceback
import datasets
_CITATION = """\
@inproceedings{lecorve2022sparql2text,
title={SPARQL-to-Text Question Generation for Knowledge-Based Conversational Applications},
author={Lecorv\'e, Gw\'enol\'e and Veyret, Morgan and Brabant, Quentin and Rojas-Barahona, Lina M.},
journal={Proceedings of the Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (AACL-IJCNLP)},
year={2022}
}
"""
_HOMEPAGE = ""
_URLS = {
"train": "train.json",
"dev": "dev.json",
"test": "test.json",
"challenge": "challenge.json"
}
_DESCRIPTION = """\
Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
"""
class WebNLGQA(datasets.GeneratorBasedBuilder):
"""
WebNLG-QA: Augmented version of WebNLG v3.0 English with follow-up SPARQL queries with their associated answer(s). A small portion of it also contains natural language questions associated with the queries.
"""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"category": datasets.Value("string"),
"size": datasets.Value("int32"),
"id": datasets.Value("string"),
"eid": datasets.Value("string"),
"original_triple_sets": [
{"subject": datasets.Value("string"),
"property": datasets.Value("string"),
"object": datasets.Value("string")}
],
"modified_triple_sets": [
{"subject": datasets.Value("string"),
"property": datasets.Value("string"),
"object": datasets.Value("string")}
],
"shape": datasets.Value("string"),
"shape_type": datasets.Value("string"),
"lex": datasets.Sequence(
{
"comment": datasets.Value("string"),
"lid": datasets.Value("string"),
"text": datasets.Value("string"),
"lang": datasets.Value("string"),
}
),
"test_category": datasets.Value("string"),
"dbpedia_links": datasets.Sequence(datasets.Value("string")),
"links": datasets.Sequence(datasets.Value("string")),
"graph": [
[datasets.Value("string")]
],
"main_entity": datasets.Value("string"),
"mappings": [
{
"modified": datasets.Value("string"),
"readable": datasets.Value("string"),
"graph": datasets.Value("string")
}
],
"dialogue": [
{
"question": [ {
"source": datasets.Value("string"),
"text": datasets.Value("string")
}],
"graph_query": datasets.Value("string"),
"readable_query": datasets.Value("string"),
"graph_answer": [
datasets.Value("string")
],
"readable_answer": [
datasets.Value("string")
],
"type": [ datasets.Value("string") ]
}
]
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
paths = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": paths['train'],
"split": "train"}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": paths['dev'],
"split": "dev"}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": paths['test'],
"split": "test"}
),
datasets.SplitGenerator(
name="challenge",
gen_kwargs={"filepath": paths['challenge'],
"split": "challenge"}
)
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
def transform_sample(original_sample):
transformed_sample = {
"category": "",
"size": -1,
"id": "",
"eid": "",
"original_triple_sets": [],
"modified_triple_sets": [],
"shape": "",
"shape_type": "",
"lex": [],
"test_category": "",
"dbpedia_links": [],
"links": [],
"graph": [],
"main_entity": "",
"mappings": [],
"dialogue": []
}
for (old_key, new_key) in [("modifiedtripleset", "modified_triple_sets"), ("originaltriplesets", "original_triple_sets"), ("dbpedialinks", "dbpedia_links"), ("lexicalisations", "lex"), ("xml_id", "eid")]:
original_sample[new_key] = original_sample[old_key]
del original_sample[old_key]
original_sample["original_triple_sets"] = original_sample["original_triple_sets"]["originaltripleset"][0]
for l in original_sample["lex"]:
l["lid"] = l["xml_id"]
del l["xml_id"]
l["text"] = l["lex"]
del l["lex"]
for turn in original_sample["dialogue"]:
if "question" in turn:
old_format = turn["question"]
new_format = []
for source, text in old_format.items():
new_format.append({"source": source, "text": text})
turn["question"] = new_format
for k in transformed_sample:
if k in original_sample:
transformed_sample[k] = original_sample[k]
# transformed_sample.update(original_sample)
return transformed_sample
# Yields (key, example) tuples from the dataset
with open(filepath,'r') as f:
data = json.load(f)
key = 0
for it in data:
yield key, transform_sample(it)
key += 1