Update fs.py
Browse files
fs.py
CHANGED
@@ -6,7 +6,7 @@ import json
|
|
6 |
import os
|
7 |
import datasets
|
8 |
from datasets import load_dataset
|
9 |
-
from transformers import AutoTokenizer
|
10 |
|
11 |
_FS_CITATION = """
|
12 |
TBD
|
@@ -24,14 +24,14 @@ For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 diffe
|
|
24 |
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
25 |
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
26 |
|
27 |
-
|
28 |
_GOV_REPORT_DESCRIPTION = """
|
29 |
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
30 |
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
31 |
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
32 |
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
33 |
|
34 |
-
|
|
|
35 |
|
36 |
_SUMM_SCREEN_CITATION = r"""
|
37 |
@misc{chen2021summscreen,
|
@@ -62,11 +62,16 @@ _GOV_REPORT_CITATION = r"""
|
|
62 |
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
63 |
}"""
|
64 |
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
class FSConfig(datasets.BuilderConfig):
|
67 |
"""BuilderConfig for FS."""
|
68 |
|
69 |
-
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer,
|
70 |
"""BuilderConfig for FS.
|
71 |
Args:
|
72 |
features: `list[string]`, list of the features that will appear in the
|
@@ -86,27 +91,80 @@ class FSConfig(datasets.BuilderConfig):
|
|
86 |
self.url = url
|
87 |
self.max_source_length = max_source_length
|
88 |
self.tokenizer = tokenizer
|
89 |
-
self.prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
|
92 |
class Fs(datasets.GeneratorBasedBuilder):
|
93 |
"""The SCROLLS benchmark."""
|
94 |
|
95 |
-
features = ["
|
96 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
97 |
BUILDER_CONFIGS = [
|
98 |
-
|
99 |
-
name="
|
100 |
description=_SUMM_SCREEN_DESCRIPTION,
|
101 |
features=features,
|
102 |
-
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/
|
103 |
citation=_SUMM_SCREEN_CITATION,
|
104 |
url="https://github.com/mingdachen/SummScreen",
|
105 |
max_source_length=None,
|
106 |
tokenizer=None,
|
107 |
-
prompt=None
|
108 |
),
|
109 |
-
|
110 |
name="gov_report",
|
111 |
description=_GOV_REPORT_CITATION,
|
112 |
features=features,
|
@@ -115,7 +173,16 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
115 |
url="https://gov-report-data.github.io/",
|
116 |
max_source_length=None,
|
117 |
tokenizer=None,
|
118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
),
|
120 |
]
|
121 |
|
@@ -141,30 +208,28 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
141 |
datasets.SplitGenerator(
|
142 |
name=datasets.Split.TRAIN,
|
143 |
gen_kwargs={
|
144 |
-
"data_file": os.path.join(dl_dir,
|
145 |
"split": datasets.Split.TRAIN,
|
146 |
"max_source_length": self.config.max_source_length,
|
147 |
-
"prompt": self.config.prompt,
|
148 |
"tokenizer": self.config.tokenizer,
|
149 |
},
|
150 |
),
|
151 |
datasets.SplitGenerator(
|
152 |
name=datasets.Split.VALIDATION,
|
153 |
gen_kwargs={
|
154 |
-
"data_file": os.path.join(dl_dir,
|
155 |
"split": datasets.Split.VALIDATION,
|
156 |
"max_source_length": self.config.max_source_length,
|
157 |
-
"prompt": self.config.prompt,
|
158 |
"tokenizer": self.config.tokenizer,
|
159 |
},
|
160 |
),
|
161 |
datasets.SplitGenerator(
|
162 |
name=datasets.Split.TEST,
|
163 |
gen_kwargs={
|
164 |
-
"data_file": os.path.join(dl_dir,
|
|
|
165 |
"split": datasets.Split.TEST,
|
166 |
"max_source_length": self.config.max_source_length,
|
167 |
-
"prompt": self.config.prompt,
|
168 |
"tokenizer": self.config.tokenizer,
|
169 |
},
|
170 |
),
|
@@ -174,7 +239,8 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
174 |
with open(data_file, encoding="utf-8") as f:
|
175 |
for line in f:
|
176 |
row = json.loads(line)
|
177 |
-
|
|
|
178 |
suffix = "\n" + self.config.prompt
|
179 |
encoded_input = tokenizer.encode(prefix + suffix)
|
180 |
|
@@ -188,9 +254,12 @@ class Fs(datasets.GeneratorBasedBuilder):
|
|
188 |
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
189 |
encoded_input = tokenizer.encode(prefix + suffix)
|
190 |
|
191 |
-
row["
|
192 |
-
row["
|
|
|
|
|
193 |
|
|
|
194 |
yield row["pid"], row
|
195 |
|
196 |
|
@@ -200,7 +269,11 @@ def _get_task_name_from_data_url(data_url):
|
|
200 |
|
201 |
if __name__ == '__main__':
|
202 |
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
203 |
-
dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
|
|
|
|
|
|
|
|
|
204 |
x = 5
|
205 |
# builder = Scrolls("scrolls", "summ_screen_fd")
|
206 |
# builder.download_and_prepare()
|
|
|
6 |
import os
|
7 |
import datasets
|
8 |
from datasets import load_dataset
|
9 |
+
from transformers import AutoTokenizer # TODO comment out when getting rid of __main__:
|
10 |
|
11 |
_FS_CITATION = """
|
12 |
TBD
|
|
|
24 |
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
25 |
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
26 |
|
|
|
27 |
_GOV_REPORT_DESCRIPTION = """
|
28 |
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
29 |
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
30 |
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
31 |
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
32 |
|
33 |
+
_ARXIV_DESCRIPTION = """
|
34 |
+
"""
|
35 |
|
36 |
_SUMM_SCREEN_CITATION = r"""
|
37 |
@misc{chen2021summscreen,
|
|
|
62 |
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
63 |
}"""
|
64 |
|
65 |
+
_ARXIV_CITATION = r"""
|
66 |
+
}"""
|
67 |
+
|
68 |
+
SUMM_PROMPT = "Summary: "
|
69 |
+
|
70 |
|
71 |
class FSConfig(datasets.BuilderConfig):
|
72 |
"""BuilderConfig for FS."""
|
73 |
|
74 |
+
def __init__(self, features, data_url, citation, url, max_source_length, tokenizer, **kwargs):
|
75 |
"""BuilderConfig for FS.
|
76 |
Args:
|
77 |
features: `list[string]`, list of the features that will appear in the
|
|
|
91 |
self.url = url
|
92 |
self.max_source_length = max_source_length
|
93 |
self.tokenizer = tokenizer
|
94 |
+
self.prompt = None
|
95 |
+
self.input_key = None
|
96 |
+
self.output_key = None
|
97 |
+
self.redundant_fields = []
|
98 |
+
|
99 |
+
self.train_file = "train.jsonl"
|
100 |
+
self.validation_file = "validation.jsonl"
|
101 |
+
self.test_file = "test.jsonl"
|
102 |
+
|
103 |
+
def remove_redundant_fields(self, example):
|
104 |
+
for field in self.redundant_fields:
|
105 |
+
del example[field]
|
106 |
+
|
107 |
+
def process_input(self, s):
|
108 |
+
return s.strip()
|
109 |
+
|
110 |
+
def process_output(self, s):
|
111 |
+
return s
|
112 |
+
|
113 |
+
|
114 |
+
class ScrollsConfig(FSConfig):
|
115 |
+
def __init__(self, **kwargs):
|
116 |
+
super().__init__(**kwargs)
|
117 |
+
self.prompt = SUMM_PROMPT
|
118 |
+
|
119 |
+
self.train_file = "train.jsonl"
|
120 |
+
self.validation_file = "validation.jsonl"
|
121 |
+
self.test_file = "test.jsonl"
|
122 |
+
|
123 |
+
self.input_key = "input"
|
124 |
+
self.output_key = "output"
|
125 |
+
self.id_key = "pid"
|
126 |
+
self.redundant_fields = [self.input_key, self.output_key, "id"]
|
127 |
+
|
128 |
+
|
129 |
+
class ArxivConfig(FSConfig):
|
130 |
+
def __init__(self, **kwargs):
|
131 |
+
super().__init__(**kwargs)
|
132 |
+
self.prompt = SUMM_PROMPT
|
133 |
+
|
134 |
+
self.train_file = "train.txt"
|
135 |
+
self.validation_file = "val.txt"
|
136 |
+
self.test_file = "test.txt"
|
137 |
+
|
138 |
+
self.input_key = "article_text"
|
139 |
+
self.output_key = "abstract_text"
|
140 |
+
self.id_key = "article_id"
|
141 |
+
self.redundant_fields = [self.input_key, self.output_key, self.id_key, 'labels', 'section_names', 'sections']
|
142 |
+
|
143 |
+
def process_input(self, s):
|
144 |
+
return ' '.join(s)
|
145 |
+
|
146 |
+
def process_output(self, s):
|
147 |
+
# TODO remove "<S>" and "</S>" ?
|
148 |
+
return self.process_input(s)
|
149 |
|
150 |
|
151 |
class Fs(datasets.GeneratorBasedBuilder):
|
152 |
"""The SCROLLS benchmark."""
|
153 |
|
154 |
+
features = ["pid", "source", "target"]
|
155 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
156 |
BUILDER_CONFIGS = [
|
157 |
+
ScrollsConfig(
|
158 |
+
name="summ_screen_fd_debug",
|
159 |
description=_SUMM_SCREEN_DESCRIPTION,
|
160 |
features=features,
|
161 |
+
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
|
162 |
citation=_SUMM_SCREEN_CITATION,
|
163 |
url="https://github.com/mingdachen/SummScreen",
|
164 |
max_source_length=None,
|
165 |
tokenizer=None,
|
|
|
166 |
),
|
167 |
+
ScrollsConfig(
|
168 |
name="gov_report",
|
169 |
description=_GOV_REPORT_CITATION,
|
170 |
features=features,
|
|
|
173 |
url="https://gov-report-data.github.io/",
|
174 |
max_source_length=None,
|
175 |
tokenizer=None,
|
176 |
+
),
|
177 |
+
ArxivConfig(
|
178 |
+
name="arxiv_debug",
|
179 |
+
description=_ARXIV_CITATION,
|
180 |
+
features=features,
|
181 |
+
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
|
182 |
+
citation=_ARXIV_DESCRIPTION,
|
183 |
+
url="https://github.com/armancohan/long-summarization",
|
184 |
+
max_source_length=None,
|
185 |
+
tokenizer=None,
|
186 |
),
|
187 |
]
|
188 |
|
|
|
208 |
datasets.SplitGenerator(
|
209 |
name=datasets.Split.TRAIN,
|
210 |
gen_kwargs={
|
211 |
+
"data_file": os.path.join(dl_dir, self.config.train_file),
|
212 |
"split": datasets.Split.TRAIN,
|
213 |
"max_source_length": self.config.max_source_length,
|
|
|
214 |
"tokenizer": self.config.tokenizer,
|
215 |
},
|
216 |
),
|
217 |
datasets.SplitGenerator(
|
218 |
name=datasets.Split.VALIDATION,
|
219 |
gen_kwargs={
|
220 |
+
"data_file": os.path.join(dl_dir, self.config.validation_file),
|
221 |
"split": datasets.Split.VALIDATION,
|
222 |
"max_source_length": self.config.max_source_length,
|
|
|
223 |
"tokenizer": self.config.tokenizer,
|
224 |
},
|
225 |
),
|
226 |
datasets.SplitGenerator(
|
227 |
name=datasets.Split.TEST,
|
228 |
gen_kwargs={
|
229 |
+
"data_file": os.path.join(dl_dir, self.config.test_file) if data_files is None else data_files[
|
230 |
+
"test"],
|
231 |
"split": datasets.Split.TEST,
|
232 |
"max_source_length": self.config.max_source_length,
|
|
|
233 |
"tokenizer": self.config.tokenizer,
|
234 |
},
|
235 |
),
|
|
|
239 |
with open(data_file, encoding="utf-8") as f:
|
240 |
for line in f:
|
241 |
row = json.loads(line)
|
242 |
+
|
243 |
+
prefix = self.config.process_input(row[self.config.input_key])
|
244 |
suffix = "\n" + self.config.prompt
|
245 |
encoded_input = tokenizer.encode(prefix + suffix)
|
246 |
|
|
|
254 |
prefix = tokenizer.decode(tokenized_prefix, skip_special_tokens=False).strip()
|
255 |
encoded_input = tokenizer.encode(prefix + suffix)
|
256 |
|
257 |
+
row["pid"] = row[self.config.id_key]
|
258 |
+
row["source"] = prefix + suffix
|
259 |
+
target = row[self.config.output_key]
|
260 |
+
row["target"] = self.config.process_output(target)
|
261 |
|
262 |
+
self.config.remove_redundant_fields(row)
|
263 |
yield row["pid"], row
|
264 |
|
265 |
|
|
|
269 |
|
270 |
if __name__ == '__main__':
|
271 |
tokenizer = AutoTokenizer.from_pretrained("t5-base")
|
272 |
+
# dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
|
273 |
+
ssfd_debug = load_dataset("/Users/uri/Projects/fs/fs.py", name="summ_screen_fd_debug", max_source_length=512,
|
274 |
+
tokenizer=tokenizer)
|
275 |
+
arxiv_debug = load_dataset("/Users/uri/Projects/fs/fs.py", name="arxiv_debug", max_source_length=512,
|
276 |
+
tokenizer=tokenizer, prompt="Summarize the above:")
|
277 |
x = 5
|
278 |
# builder = Scrolls("scrolls", "summ_screen_fd")
|
279 |
# builder.download_and_prepare()
|