Datasets:
parquet-converter
commited on
Commit
·
14c0afe
1
Parent(s):
9b3a92f
Update parquet files
Browse files- .gitattributes +0 -27
- AIC/scitldr-test.parquet +3 -0
- AIC/scitldr-train.parquet +3 -0
- AIC/scitldr-validation.parquet +3 -0
- Abstract/scitldr-test.parquet +3 -0
- Abstract/scitldr-train.parquet +3 -0
- Abstract/scitldr-validation.parquet +3 -0
- FullText/scitldr-test.parquet +3 -0
- FullText/scitldr-train.parquet +3 -0
- FullText/scitldr-validation.parquet +3 -0
- README.md +0 -305
- dataset_infos.json +0 -1
- scitldr.py +0 -169
.gitattributes
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AIC/scitldr-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe2ea3bf6e17da6835f801e38abd4d66f6ce55f617e5efdcbde3cf25f8691163
|
3 |
+
size 2420420
|
AIC/scitldr-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bed1d1dcde67bb575f8951b78f1b3b375ab82cef4ede2b9ba6b832741f45f8e4
|
3 |
+
size 7225242
|
AIC/scitldr-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06260d11e4edd5c736e939e747ea52079250accea0e95cf9a34e9489257ca164
|
3 |
+
size 2246859
|
Abstract/scitldr-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1ecde965d8a5a0fa21872e43ffc402169669c2485edcfa6e2f401b09fcfe735
|
3 |
+
size 569390
|
Abstract/scitldr-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dd84670937fc827083fa628172c0a0533ee2682e2de26dda74d57c99e6bf1bd
|
3 |
+
size 1462133
|
Abstract/scitldr-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:830c859b310797d32d6e45ff3f9bd57d3badb4c937ad75623159ecc23d5dd4da
|
3 |
+
size 529207
|
FullText/scitldr-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01943acff58815c010834dd5a14c1b33cda1f8d64350d089db108ef1b72fc1c8
|
3 |
+
size 10345754
|
FullText/scitldr-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8397c92d09f158f1d979dcbeae4d7fbc6d5ee45eaf6a4a04bb93adce626c70ab
|
3 |
+
size 32345466
|
FullText/scitldr-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23ba62cea503284c8e8cd16615d94f948facac26037d82b685e68cba46bfb1c0
|
3 |
+
size 9642576
|
README.md
DELETED
@@ -1,305 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- no-annotation
|
4 |
-
language_creators:
|
5 |
-
- found
|
6 |
-
language:
|
7 |
-
- en
|
8 |
-
license:
|
9 |
-
- unknown
|
10 |
-
multilinguality:
|
11 |
-
- monolingual
|
12 |
-
size_categories:
|
13 |
-
- 1K<n<10K
|
14 |
-
source_datasets:
|
15 |
-
- original
|
16 |
-
task_categories:
|
17 |
-
- summarization
|
18 |
-
task_ids: []
|
19 |
-
paperswithcode_id: scitldr
|
20 |
-
pretty_name: SciTLDR
|
21 |
-
tags:
|
22 |
-
- scientific-documents-summarization
|
23 |
-
dataset_info:
|
24 |
-
- config_name: Abstract
|
25 |
-
features:
|
26 |
-
- name: source
|
27 |
-
sequence: string
|
28 |
-
- name: source_labels
|
29 |
-
sequence:
|
30 |
-
class_label:
|
31 |
-
names:
|
32 |
-
0: non-oracle
|
33 |
-
1: oracle
|
34 |
-
- name: rouge_scores
|
35 |
-
sequence: float32
|
36 |
-
- name: paper_id
|
37 |
-
dtype: string
|
38 |
-
- name: target
|
39 |
-
sequence: string
|
40 |
-
splits:
|
41 |
-
- name: train
|
42 |
-
num_bytes: 2738065
|
43 |
-
num_examples: 1992
|
44 |
-
- name: test
|
45 |
-
num_bytes: 1073656
|
46 |
-
num_examples: 618
|
47 |
-
- name: validation
|
48 |
-
num_bytes: 994876
|
49 |
-
num_examples: 619
|
50 |
-
download_size: 5483987
|
51 |
-
dataset_size: 4806597
|
52 |
-
- config_name: AIC
|
53 |
-
features:
|
54 |
-
- name: source
|
55 |
-
sequence: string
|
56 |
-
- name: source_labels
|
57 |
-
sequence:
|
58 |
-
class_label:
|
59 |
-
names:
|
60 |
-
0: 0
|
61 |
-
1: 1
|
62 |
-
- name: rouge_scores
|
63 |
-
sequence: float32
|
64 |
-
- name: paper_id
|
65 |
-
dtype: string
|
66 |
-
- name: ic
|
67 |
-
dtype: bool_
|
68 |
-
- name: target
|
69 |
-
sequence: string
|
70 |
-
splits:
|
71 |
-
- name: train
|
72 |
-
num_bytes: 14473822
|
73 |
-
num_examples: 1992
|
74 |
-
- name: test
|
75 |
-
num_bytes: 4822026
|
76 |
-
num_examples: 618
|
77 |
-
- name: validation
|
78 |
-
num_bytes: 4476237
|
79 |
-
num_examples: 619
|
80 |
-
download_size: 25545108
|
81 |
-
dataset_size: 23772085
|
82 |
-
- config_name: FullText
|
83 |
-
features:
|
84 |
-
- name: source
|
85 |
-
sequence: string
|
86 |
-
- name: source_labels
|
87 |
-
sequence:
|
88 |
-
class_label:
|
89 |
-
names:
|
90 |
-
0: non-oracle
|
91 |
-
1: oracle
|
92 |
-
- name: rouge_scores
|
93 |
-
sequence: float32
|
94 |
-
- name: paper_id
|
95 |
-
dtype: string
|
96 |
-
- name: target
|
97 |
-
sequence: string
|
98 |
-
splits:
|
99 |
-
- name: train
|
100 |
-
num_bytes: 66917363
|
101 |
-
num_examples: 1992
|
102 |
-
- name: test
|
103 |
-
num_bytes: 20182554
|
104 |
-
num_examples: 618
|
105 |
-
- name: validation
|
106 |
-
num_bytes: 18790651
|
107 |
-
num_examples: 619
|
108 |
-
download_size: 110904552
|
109 |
-
dataset_size: 105890568
|
110 |
-
---
|
111 |
-
|
112 |
-
# Dataset Card for SciTLDR
|
113 |
-
|
114 |
-
## Table of Contents
|
115 |
-
- [Dataset Description](#dataset-description)
|
116 |
-
- [Dataset Summary](#dataset-summary)
|
117 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
118 |
-
- [Languages](#languages)
|
119 |
-
- [Dataset Structure](#dataset-structure)
|
120 |
-
- [Data Instances](#data-instances)
|
121 |
-
- [Data Fields](#data-fields)
|
122 |
-
- [Data Splits](#data-splits)
|
123 |
-
- [Dataset Creation](#dataset-creation)
|
124 |
-
- [Curation Rationale](#curation-rationale)
|
125 |
-
- [Source Data](#source-data)
|
126 |
-
- [Annotations](#annotations)
|
127 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
128 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
129 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
130 |
-
- [Discussion of Biases](#discussion-of-biases)
|
131 |
-
- [Other Known Limitations](#other-known-limitations)
|
132 |
-
- [Additional Information](#additional-information)
|
133 |
-
- [Dataset Curators](#dataset-curators)
|
134 |
-
- [Licensing Information](#licensing-information)
|
135 |
-
- [Citation Information](#citation-information)
|
136 |
-
- [Contributions](#contributions)
|
137 |
-
|
138 |
-
## Dataset Description
|
139 |
-
|
140 |
-
- **Homepage:** https://github.com/allenai/scitldr
|
141 |
-
- **Repository:** https://github.com/allenai/scitldr
|
142 |
-
- **Paper:** https://arxiv.org/abs/2004.15011
|
143 |
-
- **Leaderboard:**
|
144 |
-
- **Point of Contact:** {isabelc,kylel,armanc,danw}@allenai.org
|
145 |
-
|
146 |
-
### Dataset Summary
|
147 |
-
`SciTLDR`: Extreme Summarization of Scientific Documents
|
148 |
-
|
149 |
-
SciTLDR is a new multi-target dataset of 5.4K TLDRs over 3.2K papers. SciTLDR contains both author-written and expert-derived TLDRs, where the latter are collected using a novel annotation protocol that produces high-quality summaries while minimizing annotation burden.
|
150 |
-
|
151 |
-
### Supported Tasks and Leaderboards
|
152 |
-
|
153 |
-
summarization
|
154 |
-
|
155 |
-
### Languages
|
156 |
-
|
157 |
-
English
|
158 |
-
|
159 |
-
## Dataset Structure
|
160 |
-
|
161 |
-
SciTLDR is split in to a 60/20/20 train/dev/test split. For each file, each line is a json, formatted as follows
|
162 |
-
```
|
163 |
-
{
|
164 |
-
"source":[
|
165 |
-
"sent0",
|
166 |
-
"sent1",
|
167 |
-
"sent2",
|
168 |
-
...
|
169 |
-
],
|
170 |
-
"source_labels":[binary list in which 1 is the oracle sentence],
|
171 |
-
"rouge_scores":[precomputed rouge-1 scores],
|
172 |
-
"paper_id":"PAPER-ID",
|
173 |
-
"target":[
|
174 |
-
"author-tldr",
|
175 |
-
"pr-tldr0",
|
176 |
-
"pr-tldr1",
|
177 |
-
...
|
178 |
-
],
|
179 |
-
"title":"TITLE"
|
180 |
-
}
|
181 |
-
```
|
182 |
-
The keys `rouge_scores` and `source_labels` are not necessary for any code to run, precomputed Rouge scores are provided for future research.
|
183 |
-
|
184 |
-
### Data Instances
|
185 |
-
|
186 |
-
{
|
187 |
-
"source": [
|
188 |
-
"Mixed precision training (MPT) is becoming a practical technique to improve the speed and energy efficiency of training deep neural networks by leveraging the fast hardware support for IEEE half-precision floating point that is available in existing GPUs.",
|
189 |
-
"MPT is typically used in combination with a technique called loss scaling, that works by scaling up the loss value up before the start of backpropagation in order to minimize the impact of numerical underflow on training.",
|
190 |
-
"Unfortunately, existing methods make this loss scale value a hyperparameter that needs to be tuned per-model, and a single scale cannot be adapted to different layers at different training stages.",
|
191 |
-
"We introduce a loss scaling-based training method called adaptive loss scaling that makes MPT easier and more practical to use, by removing the need to tune a model-specific loss scale hyperparameter.",
|
192 |
-
"We achieve this by introducing layer-wise loss scale values which are automatically computed during training to deal with underflow more effectively than existing methods.",
|
193 |
-
"We present experimental results on a variety of networks and tasks that show our approach can shorten the time to convergence and improve accuracy, compared with using the existing state-of-the-art MPT and single-precision floating point."
|
194 |
-
],
|
195 |
-
"source_labels": [
|
196 |
-
0,
|
197 |
-
0,
|
198 |
-
0,
|
199 |
-
1,
|
200 |
-
0,
|
201 |
-
0
|
202 |
-
],
|
203 |
-
"rouge_scores": [
|
204 |
-
0.2399999958000001,
|
205 |
-
0.26086956082230633,
|
206 |
-
0.19999999531250012,
|
207 |
-
0.38095237636054424,
|
208 |
-
0.2051282003944774,
|
209 |
-
0.2978723360796741
|
210 |
-
],
|
211 |
-
"paper_id": "rJlnfaNYvB",
|
212 |
-
"target": [
|
213 |
-
"We devise adaptive loss scaling to improve mixed precision training that surpass the state-of-the-art results.",
|
214 |
-
"Proposal for an adaptive loss scaling method during backpropagation for mix precision training where scale rate is decided automatically to reduce the underflow.",
|
215 |
-
"The authors propose a method to train models in FP16 precision that adopts a more elaborate way to minimize underflow in every layer simultaneously and automatically."
|
216 |
-
],
|
217 |
-
"title": "Adaptive Loss Scaling for Mixed Precision Training"
|
218 |
-
}
|
219 |
-
|
220 |
-
### Data Fields
|
221 |
-
|
222 |
-
- `source`: The Abstract, Introduction and Conclusion (AIC) or Full text of the paper, with one sentence per line.
|
223 |
-
- `source_labels`: Binary 0 or 1, 1 denotes the oracle sentence.
|
224 |
-
- `rouge_scores`: Precomputed ROUGE baseline scores for each sentence.
|
225 |
-
- `paper_id`: Arxiv Paper ID.
|
226 |
-
- `target`: Multiple summaries for each sentence, one sentence per line.
|
227 |
-
- `title`: Title of the paper.
|
228 |
-
### Data Splits
|
229 |
-
|
230 |
-
| | train | valid | test |
|
231 |
-
|-------------------|-------|--------|------|
|
232 |
-
| SciTLDR-A | 1992 | 618 | 619 |
|
233 |
-
| SciTLDR-AIC | 1992 | 618 | 619 |
|
234 |
-
| SciTLDR-FullText | 1992 | 618 | 619 |
|
235 |
-
|
236 |
-
## Dataset Creation
|
237 |
-
|
238 |
-
[More Information Needed]
|
239 |
-
|
240 |
-
### Curation Rationale
|
241 |
-
|
242 |
-
[More Information Needed]
|
243 |
-
|
244 |
-
### Source Data
|
245 |
-
|
246 |
-
#### Initial Data Collection and Normalization
|
247 |
-
|
248 |
-
[More Information Needed]
|
249 |
-
|
250 |
-
#### Who are the source language producers?
|
251 |
-
https://allenai.org/
|
252 |
-
|
253 |
-
### Annotations
|
254 |
-
|
255 |
-
#### Annotation process
|
256 |
-
|
257 |
-
Given the title and first 128 words of a reviewer comment about a paper,
|
258 |
-
re-write the summary (if it exists) into a single sentence or an incomplete
|
259 |
-
phrase. Summaries must be no more than one sentence.
|
260 |
-
Most summaries are between 15 and 25 words. The average rewritten summary is
|
261 |
-
20 words long.
|
262 |
-
|
263 |
-
#### Who are the annotators?
|
264 |
-
|
265 |
-
[More Information Needed]
|
266 |
-
|
267 |
-
### Personal and Sensitive Information
|
268 |
-
|
269 |
-
[More Information Needed]
|
270 |
-
|
271 |
-
## Considerations for Using the Data
|
272 |
-
|
273 |
-
### Social Impact of Dataset
|
274 |
-
|
275 |
-
To encourage further research in the area of extreme summarization of scientific documents.
|
276 |
-
|
277 |
-
### Discussion of Biases
|
278 |
-
|
279 |
-
[More Information Needed]
|
280 |
-
|
281 |
-
### Other Known Limitations
|
282 |
-
|
283 |
-
[More Information Needed]
|
284 |
-
|
285 |
-
## Additional Information
|
286 |
-
|
287 |
-
### Dataset Curators
|
288 |
-
|
289 |
-
[More Information Needed]
|
290 |
-
|
291 |
-
### Licensing Information
|
292 |
-
|
293 |
-
Apache License 2.0
|
294 |
-
|
295 |
-
### Citation Information
|
296 |
-
@article{cachola2020tldr,
|
297 |
-
title={{TLDR}: Extreme Summarization of Scientific Documents},
|
298 |
-
author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},
|
299 |
-
journal={arXiv:2004.15011},
|
300 |
-
year={2020},
|
301 |
-
}
|
302 |
-
|
303 |
-
### Contributions
|
304 |
-
|
305 |
-
Thanks to [@Bharat123rox](https://github.com/Bharat123rox) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"Abstract": {"description": "A new multi-target dataset of 5.4K TLDRs over 3.2K papers.\nSCITLDR contains both author-written and expert-derived TLDRs,\nwhere the latter are collected using a novel annotation protocol\nthat produces high-quality summaries while minimizing annotation burden.\n", "citation": "@article{cachola2020tldr,\n title={{TLDR}: Extreme Summarization of Scientific Documents},\n author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},\n journal={arXiv:2004.15011},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/scitldr", "license": "Apache License 2.0", "features": {"source": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "source_labels": {"feature": {"num_classes": 2, "names": ["non-oracle", "oracle"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "rouge_scores": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "paper_id": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": {"input": "source", "output": "target"}, "builder_name": "scitldr", "config_name": "Abstract", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 2738065, "num_examples": 1992, "dataset_name": "scitldr"}, "test": {"name": "test", "num_bytes": 1073656, "num_examples": 618, "dataset_name": "scitldr"}, "validation": {"name": "validation", "num_bytes": 994876, "num_examples": 619, "dataset_name": "scitldr"}}, "download_checksums": {"https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-A/train.jsonl": {"num_bytes": 3155015, "checksum": "b222771d387be585cfdf5ae957b36757138415a352e0a3e3b23f73f87c3b1119"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-A/dev.jsonl": {"num_bytes": 1124865, "checksum": "3191fa98ccc09521332b7a1cd63b1930be4e8df125a235ccd31e40329709525e"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-A/test.jsonl": {"num_bytes": 1204107, "checksum": "fb42dd6cd4f4a1928ae8a01a189456fbfe994a07e938bd49f68653933f6503c9"}}, "download_size": 5483987, "post_processing_size": null, "dataset_size": 4806597, "size_in_bytes": 10290584}, "AIC": {"description": "A new multi-target dataset of 5.4K TLDRs over 3.2K papers.\nSCITLDR contains both author-written and expert-derived TLDRs,\nwhere the latter are collected using a novel annotation protocol\nthat produces high-quality summaries while minimizing annotation burden.\n", "citation": "@article{cachola2020tldr,\n title={{TLDR}: Extreme Summarization of Scientific Documents},\n author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},\n journal={arXiv:2004.15011},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/scitldr", "license": "Apache License 2.0", "features": {"source": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "source_labels": {"feature": {"num_classes": 2, "names": [0, 1], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "rouge_scores": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "paper_id": {"dtype": "string", "id": null, "_type": "Value"}, "ic": {"dtype": "bool_", "id": null, "_type": "Value"}, "target": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": {"input": "source", "output": "target"}, "builder_name": "scitldr", "config_name": "AIC", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 14473822, "num_examples": 1992, "dataset_name": "scitldr"}, "test": {"name": "test", "num_bytes": 4822026, "num_examples": 618, "dataset_name": "scitldr"}, "validation": {"name": "validation", "num_bytes": 4476237, "num_examples": 619, "dataset_name": "scitldr"}}, "download_checksums": {"https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-AIC/train.jsonl": {"num_bytes": 15569568, "checksum": "64b08af6de479671a12afd04770f66bcbc1c2c5f3098a08392b0fd7c1070d621"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-AIC/dev.jsonl": {"num_bytes": 4811551, "checksum": "ac5168c27d25181fc17bb6f1fb41d11dbe30c627bebee14457feb3bad2c839dd"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-AIC/test.jsonl": {"num_bytes": 5163989, "checksum": "7cb9230d3eb4863884762154918360d1c063aa18fc76de928801a14f4bcf4d37"}}, "download_size": 25545108, "post_processing_size": null, "dataset_size": 23772085, "size_in_bytes": 49317193}, "FullText": {"description": "A new multi-target dataset of 5.4K TLDRs over 3.2K papers.\nSCITLDR contains both author-written and expert-derived TLDRs,\nwhere the latter are collected using a novel annotation protocol\nthat produces high-quality summaries while minimizing annotation burden.\n", "citation": "@article{cachola2020tldr,\n title={{TLDR}: Extreme Summarization of Scientific Documents},\n author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},\n journal={arXiv:2004.15011},\n year={2020},\n}\n", "homepage": "https://github.com/allenai/scitldr", "license": "Apache License 2.0", "features": {"source": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "source_labels": {"feature": {"num_classes": 2, "names": ["non-oracle", "oracle"], "names_file": null, "id": null, "_type": "ClassLabel"}, "length": -1, "id": null, "_type": "Sequence"}, "rouge_scores": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "paper_id": {"dtype": "string", "id": null, "_type": "Value"}, "target": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": {"input": "source", "output": "target"}, "builder_name": "scitldr", "config_name": "FullText", "version": "0.0.0", "splits": {"train": {"name": "train", "num_bytes": 66917363, "num_examples": 1992, "dataset_name": "scitldr"}, "test": {"name": "test", "num_bytes": 20182554, "num_examples": 618, "dataset_name": "scitldr"}, "validation": {"name": "validation", "num_bytes": 18790651, "num_examples": 619, "dataset_name": "scitldr"}}, "download_checksums": {"https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-FullText/train.jsonl": {"num_bytes": 71263949, "checksum": "e35461c1665cb4f7b46daba6dd5ac3cff03a61eb196e6ce9983edda44d867604"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-FullText/dev.jsonl": {"num_bytes": 19111616, "checksum": "11c3fd77a7ec447adc44ca34c0fa41a7ab6bdacdf3b8e15748e6f8b8e4f698bf"}, "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-FullText/test.jsonl": {"num_bytes": 20528987, "checksum": "1584bd3f5fff5859cb8428cfbacc8d38c671f5fc6a24a8140ea5350cbd86a751"}}, "download_size": 110904552, "post_processing_size": null, "dataset_size": 105890568, "size_in_bytes": 216795120}}
|
|
|
|
scitldr.py
DELETED
@@ -1,169 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Dataset for TLDR: Extreme Summarization of Scientific Documents"""
|
16 |
-
|
17 |
-
|
18 |
-
import json
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_SOURCE = "source"
|
25 |
-
_TARGET = "target"
|
26 |
-
|
27 |
-
_CITATION = """\
|
28 |
-
@article{cachola2020tldr,
|
29 |
-
title={{TLDR}: Extreme Summarization of Scientific Documents},
|
30 |
-
author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},
|
31 |
-
journal={arXiv:2004.15011},
|
32 |
-
year={2020},
|
33 |
-
}
|
34 |
-
"""
|
35 |
-
|
36 |
-
_DESCRIPTION = """\
|
37 |
-
A new multi-target dataset of 5.4K TLDRs over 3.2K papers.
|
38 |
-
SCITLDR contains both author-written and expert-derived TLDRs,
|
39 |
-
where the latter are collected using a novel annotation protocol
|
40 |
-
that produces high-quality summaries while minimizing annotation burden.
|
41 |
-
"""
|
42 |
-
|
43 |
-
|
44 |
-
_LICENSE = "Apache License 2.0"
|
45 |
-
|
46 |
-
# TODO: Add link to the official dataset URLs here
|
47 |
-
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
48 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
49 |
-
_URLs = {
|
50 |
-
"Abstract": "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-A/",
|
51 |
-
"AIC": "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-AIC/",
|
52 |
-
"FullText": "https://raw.githubusercontent.com/allenai/scitldr/master/SciTLDR-Data/SciTLDR-FullText/",
|
53 |
-
}
|
54 |
-
|
55 |
-
_TRAIN_DATA = "train.jsonl"
|
56 |
-
_TEST_DATA = "test.jsonl"
|
57 |
-
_VALID_DATA = "dev.jsonl"
|
58 |
-
|
59 |
-
|
60 |
-
# There are several preprocessing scripts given in the original SciTLDR GitHub repository to preprocess this data.
|
61 |
-
class Scitldr(datasets.GeneratorBasedBuilder):
|
62 |
-
"""Dataset for TLDR: Extreme Summarization of Scientific Documents."""
|
63 |
-
|
64 |
-
VERSION = datasets.Version("1.1.0")
|
65 |
-
|
66 |
-
# You will be able to load one or the other configurations in the following list with
|
67 |
-
# data = datasets.load_dataset('scitldr', 'Abstract')
|
68 |
-
# data = datasets.load_dataset('scitldr', 'AIC')
|
69 |
-
BUILDER_CONFIGS = [
|
70 |
-
datasets.BuilderConfig(name="Abstract", description="This part contains only abstracts of the paper"),
|
71 |
-
datasets.BuilderConfig(
|
72 |
-
name="AIC",
|
73 |
-
description="This part contains Abstracts, Introduction and Conclusion (AIC) sections of the paper",
|
74 |
-
),
|
75 |
-
datasets.BuilderConfig(name="FullText", description="This part contains the full text of the paper"),
|
76 |
-
]
|
77 |
-
|
78 |
-
DEFAULT_CONFIG_NAME = (
|
79 |
-
"Abstract" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
80 |
-
)
|
81 |
-
|
82 |
-
def _info(self):
|
83 |
-
if self.config.name == "AIC": # This is the name of the configuration selected in BUILDER_CONFIGS above
|
84 |
-
features = datasets.Features(
|
85 |
-
{
|
86 |
-
"source": datasets.Sequence(datasets.Value("string")),
|
87 |
-
"source_labels": datasets.Sequence(datasets.ClassLabel(num_classes=2, names=[0, 1])),
|
88 |
-
"rouge_scores": datasets.Sequence(datasets.Value("float32")),
|
89 |
-
"paper_id": datasets.Value("string"),
|
90 |
-
"ic": datasets.Value("bool_"),
|
91 |
-
"target": datasets.features.Sequence(datasets.Value("string"))
|
92 |
-
# These are the features of your dataset like images, labels ...
|
93 |
-
}
|
94 |
-
)
|
95 |
-
else:
|
96 |
-
features = datasets.Features(
|
97 |
-
{
|
98 |
-
"source": datasets.Sequence(datasets.Value("string")),
|
99 |
-
"source_labels": datasets.Sequence(
|
100 |
-
datasets.ClassLabel(num_classes=2, names=["non-oracle", "oracle"])
|
101 |
-
),
|
102 |
-
"rouge_scores": datasets.Sequence(datasets.Value("float32")),
|
103 |
-
"paper_id": datasets.Value("string"),
|
104 |
-
"target": datasets.Sequence(datasets.Value("string"))
|
105 |
-
# These are the features of your dataset like images, labels ...
|
106 |
-
}
|
107 |
-
)
|
108 |
-
return datasets.DatasetInfo(
|
109 |
-
# This is the description that will appear on the datasets page.
|
110 |
-
description=_DESCRIPTION,
|
111 |
-
# This defines the different columns of the dataset and their types
|
112 |
-
features=features, # Here we define them above because they are different between the two configurations
|
113 |
-
# If there's a common (input, target) tuple from the features,
|
114 |
-
# specify them here. They'll be used if as_supervised=True in
|
115 |
-
# builder.as_dataset.
|
116 |
-
supervised_keys=(_SOURCE, _TARGET),
|
117 |
-
# Homepage of the dataset for documentation
|
118 |
-
homepage="https://github.com/allenai/scitldr",
|
119 |
-
# License for the dataset if available
|
120 |
-
license=_LICENSE,
|
121 |
-
# Citation for the dataset
|
122 |
-
citation=_CITATION,
|
123 |
-
)
|
124 |
-
|
125 |
-
def _split_generators(self, dl_manager):
|
126 |
-
"""Returns SplitGenerators."""
|
127 |
-
urls = {
|
128 |
-
"train": _URLs[self.config.name] + _TRAIN_DATA,
|
129 |
-
"valid": _URLs[self.config.name] + _VALID_DATA,
|
130 |
-
"test": _URLs[self.config.name] + _TEST_DATA,
|
131 |
-
}
|
132 |
-
data_dir = dl_manager.download(urls)
|
133 |
-
return [
|
134 |
-
datasets.SplitGenerator(
|
135 |
-
name=datasets.Split.TRAIN,
|
136 |
-
gen_kwargs={"filepath": os.path.join(data_dir["train"])},
|
137 |
-
),
|
138 |
-
datasets.SplitGenerator(
|
139 |
-
name=datasets.Split.TEST,
|
140 |
-
gen_kwargs={"filepath": os.path.join(data_dir["test"])},
|
141 |
-
),
|
142 |
-
datasets.SplitGenerator(
|
143 |
-
name=datasets.Split.VALIDATION,
|
144 |
-
gen_kwargs={"filepath": os.path.join(data_dir["valid"])},
|
145 |
-
),
|
146 |
-
]
|
147 |
-
|
148 |
-
def _generate_examples(self, filepath):
|
149 |
-
"""Yields examples."""
|
150 |
-
with open(filepath, encoding="utf-8") as f:
|
151 |
-
for id_, row in enumerate(f):
|
152 |
-
data = json.loads(row)
|
153 |
-
if self.config.name == "AIC":
|
154 |
-
yield id_, {
|
155 |
-
"source": data["source"],
|
156 |
-
"source_labels": data["source_labels"],
|
157 |
-
"rouge_scores": data["rouge_scores"],
|
158 |
-
"paper_id": data["paper_id"],
|
159 |
-
"ic": True if data["ic"] else False,
|
160 |
-
"target": data["target"],
|
161 |
-
}
|
162 |
-
else:
|
163 |
-
yield id_, {
|
164 |
-
"source": data["source"],
|
165 |
-
"source_labels": data["source_labels"],
|
166 |
-
"rouge_scores": data["rouge_scores"],
|
167 |
-
"paper_id": data["paper_id"],
|
168 |
-
"target": data["target"],
|
169 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|