|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@inproceedings{sun-etal-2021-d2s, |
|
title = "{D}2{S}: Document-to-Slide Generation Via Query-Based Text Summarization", |
|
author = "Sun, Edward and |
|
Hou, Yufang and |
|
Wang, Dakuo and |
|
Zhang, Yunfeng and |
|
Wang, Nancy X. R.", |
|
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
month = June, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.naacl-main.111", |
|
doi = "10.18653/v1/2021.naacl-main.111", |
|
pages = "1405--1418", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
SciDuet is the first publicaly available dataset for the challenging task of document2slides generation, |
|
The dataset integrated into GEM is the ACL portion of the whole dataset described in "https://aclanthology.org/2021.naacl-main.111.pdf". |
|
It contains the full Dev and Test sets, and a portion of the Train dataset. |
|
We additionally create a challenge dataset in which the slide titles do not match with the |
|
section headers of the corresponding paper. |
|
Note that although we cannot release the whole training dataset due to copyright issues, researchers can still |
|
use our released data procurement code from https://github.com/IBM/document2slides |
|
to generate the training dataset from the online ICML/NeurIPS anthologies. |
|
In the released dataset, the original papers and slides (both are in PDF format) are carefully processed by a combination of PDF/Image processing tookits. |
|
The text contents from multiple slides that correspond to the same slide title are mreged. |
|
""" |
|
|
|
|
|
|
|
|
|
class SciDuetConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SciDuet.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for SciDuet. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(SciDuetConfig, self).__init__(**kwargs) |
|
|
|
|
|
class SciDuet(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"gem_id": datasets.Value("string"), |
|
"paper_id": datasets.Value("string"), |
|
"paper_title": datasets.Value("string"), |
|
"paper_abstract": datasets.Value("string"), |
|
"paper_content": datasets.features.Sequence({ |
|
"paper_content_id": datasets.Value("int32"), |
|
"paper_content_text": datasets.Value("string"), |
|
}), |
|
"paper_headers": datasets.features.Sequence({ |
|
"paper_header_number": datasets.Value("string"), |
|
"paper_header_content": datasets.Value("string"), |
|
}), |
|
|
|
"slide_id": datasets.Value("string"), |
|
"slide_title": datasets.Value("string"), |
|
"slide_content_text": datasets.Value("string"), |
|
"target": datasets.Value("string"), |
|
"references": [datasets.Value("string")], |
|
} |
|
), |
|
supervised_keys=None, |
|
license="Apache License 2.0", |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
_URL = "https://huggingface.co/datasets/GEM/SciDuet/" |
|
_URLs = { |
|
"train": "train.json", |
|
"validation": "validation.json", |
|
"test": "test.json", |
|
"challenge_set": "challenge_woSectionHeader.json", |
|
} |
|
downloaded_files = dl_manager.download_and_extract(_URLs) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["validation"], |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": downloaded_files["test"], |
|
"split": "test", |
|
}, |
|
), |
|
] + [ |
|
datasets.SplitGenerator( |
|
name="challenge_woSectionHeader", |
|
gen_kwargs={ |
|
"filepath": downloaded_files["challenge_set"], |
|
"split": "challenge_woSectionHeader", |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""Yields examples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
data = json.load(f)["data"] |
|
for item in data: |
|
gem_id = item["gem_id"] |
|
paper_id = item["paper_id"] |
|
paper_title = item["paper_title"] |
|
paper_abstract = item["paper"]["abstract"] |
|
paper_content_ids = [text["id"] for text in item["paper"]["text"]] |
|
paper_content_texts = [text["string"] for text in item["paper"]["text"]] |
|
paper_header_numbers = [header["n"] for header in item["paper"]["headers"]] |
|
paper_header_contents = [header["section"] for header in item["paper"]["headers"]] |
|
for j in item["slides"]: |
|
id_ = gem_id + "#" + "paper-" + paper_id + "#" + "slide-" + str(j) |
|
slide_title = item["slides"][j]["title"] |
|
slide_content_text = '\n'.join(item["slides"][j]["text"]) |
|
|
|
yield id_, { |
|
"gem_id": id_, |
|
"paper_id": paper_id, |
|
"paper_title": paper_title, |
|
"paper_abstract": paper_abstract, |
|
"paper_content": {"paper_content_id":paper_content_ids, "paper_content_text":paper_content_texts}, |
|
"paper_headers": {"paper_header_number":paper_header_numbers, "paper_header_content":paper_header_contents}, |
|
"slide_id": id_, |
|
"slide_title": slide_title, |
|
"slide_content_text": slide_content_text, |
|
"target": slide_content_text, |
|
"references": [] if split == "train" else [slide_content_text], |
|
} |
|
|
|
|
|
|
|
|
|
|