File size: 5,511 Bytes
1704831 571ae13 1704831 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import json
import os
import datasets
from datasets.tasks import TextClassification
_CITATION = None
_DESCRIPTION = """
WCEP10 dataset for summarization.
From paper: "A Large-Scale Multi-Document Summarization Dataset from the Wikipedia
Current Events Portal" by D. Gholipour et al."
From paper: "PRIMER: Pyramid-based Masked Sentence Pre-training for Multi-document
Summarization" by W. Xiao et al."
"""
_CITATION = """\
@article{DBLP:journals/corr/abs-2005-10070,
author = {Demian Gholipour Ghalandari and
Chris Hokamp and
Nghia The Pham and
John Glover and
Georgiana Ifrim},
title = {A Large-Scale Multi-Document Summarization Dataset from the Wikipedia
Current Events Portal},
journal = {CoRR},
volume = {abs/2005.10070},
year = {2020},
url = {https://arxiv.org/abs/2005.10070},
eprinttype = {arXiv},
eprint = {2005.10070},
timestamp = {Fri, 22 May 2020 16:21:28 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2005-10070.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/corr/abs-2110-08499,
author = {Wen Xiao and
Iz Beltagy and
Giuseppe Carenini and
Arman Cohan},
title = {{PRIMER:} Pyramid-based Masked Sentence Pre-training for Multi-document
Summarization},
journal = {CoRR},
volume = {abs/2110.08499},
year = {2021},
url = {https://arxiv.org/abs/2110.08499},
eprinttype = {arXiv},
eprint = {2110.08499},
timestamp = {Fri, 22 Oct 2021 13:33:09 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-2110-08499.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_ABSTRACT = "summary"
_ARTICLE = "document"
class WCEP10SummarizationConfig(datasets.BuilderConfig):
"""BuilderConfig for WCEP10Summarization."""
def __init__(self, **kwargs):
"""BuilderConfig for WCEP10Summarization.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(WCEP10SummarizationConfig, self).__init__(**kwargs)
class WCEP10SummarizationDataset(datasets.GeneratorBasedBuilder):
"""WCEP10Summarization Dataset."""
_TRAIN_FILE = "train.zip"
_VAL_FILE = "val.zip"
_TEST_FILE = "test.zip"
BUILDER_CONFIGS = [
WCEP10SummarizationConfig(
name="newline",
version=datasets.Version("1.0.0"),
description="WCEP10 dataset for summarization, concat sections",
),
WCEP10SummarizationConfig(
name="roberta",
version=datasets.Version("1.0.0"),
description="WCEP10 dataset for summarization, document",
),
WCEP10SummarizationConfig(
name="bert",
version=datasets.Version("1.0.0"),
description="WCEP10 dataset for summarization, document",
),
WCEP10SummarizationConfig(
name="list",
version=datasets.Version("1.0.0"),
description="WCEP10 dataset for summarization, document",
),
]
DEFAULT_CONFIG_NAME = "roberta"
def _info(self):
# Should return a datasets.DatasetInfo object
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
_ARTICLE: datasets.Sequence(datasets.Value("string")) if self.config.name == "list" else datasets.Value("string"),
_ABSTRACT: datasets.Value("string"),
#"id": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/allenai/PRIMER",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_path = os.path.join(dl_manager.download_and_extract(self._TRAIN_FILE), "train.txt")
val_path = os.path.join(dl_manager.download_and_extract(self._VAL_FILE), "val.txt")
test_path = os.path.join(dl_manager.download_and_extract(self._TEST_FILE), "test.txt")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
),
]
def _generate_examples(self, filepath):
"""Generate WCEP10Summarization examples."""
if self.config.name == "newline":
join_ = "\n"
elif self.config.name == "roberta":
join_ = "</s>"
elif self.config.name == "bert":
join_ = "[SEP]"
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(f):
data = json.loads(row)
"""
'summary': str,
'document': List[str],
"""
document = data["document"]
if self.config.name != "list":
document = join_.join(document)
summary = data["summary"]
yield id_, {"document": document, "summary": summary}
|