yuvalkirstain
commited on
Commit
•
a5a81e7
1
Parent(s):
777a3f8
arxiv works
Browse files- citations_and_descriptions.py +0 -56
- configs/arxiv.py +0 -37
- configs/fs.py +0 -66
- configs/scrolls.py +0 -30
- configs/super_glue.py +0 -48
- data/arxiv_debug.zip +0 -3
- data/summ_screen_fd_debug.zip +0 -3
- debug.py +5 -11
- fs.py +85 -65
- normalize_raw_data/normalize_scrolls.py +0 -26
citations_and_descriptions.py
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
_FS_CITATION = """
|
2 |
-
TBD
|
3 |
-
"""
|
4 |
-
|
5 |
-
_FS_DESCRIPTION = """
|
6 |
-
TBD
|
7 |
-
"""
|
8 |
-
|
9 |
-
_SUMM_SCREEN_DESCRIPTION = """
|
10 |
-
SummScreenFD (Chen et al., 2021) is a summarization dataset in the domain of TV shows (e.g. Friends, Game of Thrones).
|
11 |
-
Given a transcript of a specific episode, the goal is to produce the episode's recap.
|
12 |
-
The original dataset is divided into two complementary subsets, based on the source of its community contributed transcripts.
|
13 |
-
For SCROLLS, we use the ForeverDreaming (FD) subset, as it incorporates 88 different shows,
|
14 |
-
making it a more diverse alternative to the TV MegaSite (TMS) subset, which has only 10 shows.
|
15 |
-
Community-authored recaps for the ForeverDreaming transcripts were collected from English Wikipedia and TVMaze."""
|
16 |
-
|
17 |
-
_GOV_REPORT_DESCRIPTION = """
|
18 |
-
GovReport (Huang et al., 2021) is a summarization dataset of reports addressing various national policy issues published by the
|
19 |
-
Congressional Research Service and the U.S. Government Accountability Office, where each document is paired with a hand-written executive summary.
|
20 |
-
The reports and their summaries are longer than their equivalents in other popular long-document summarization datasets;
|
21 |
-
for example, GovReport's documents are approximately 1.5 and 2.5 times longer than the documents in Arxiv and PubMed, respectively."""
|
22 |
-
|
23 |
-
_ARXIV_DESCRIPTION = """
|
24 |
-
"""
|
25 |
-
|
26 |
-
_SUMM_SCREEN_CITATION = r"""
|
27 |
-
@misc{chen2021summscreen,
|
28 |
-
title={SummScreen: A Dataset for Abstractive Screenplay Summarization},
|
29 |
-
author={Mingda Chen and Zewei Chu and Sam Wiseman and Kevin Gimpel},
|
30 |
-
year={2021},
|
31 |
-
eprint={2104.07091},
|
32 |
-
archivePrefix={arXiv},
|
33 |
-
primaryClass={cs.CL}
|
34 |
-
}"""
|
35 |
-
|
36 |
-
_GOV_REPORT_CITATION = r"""
|
37 |
-
@inproceedings{huang-etal-2021-efficient,
|
38 |
-
title = "Efficient Attentions for Long Document Summarization",
|
39 |
-
author = "Huang, Luyang and
|
40 |
-
Cao, Shuyang and
|
41 |
-
Parulian, Nikolaus and
|
42 |
-
Ji, Heng and
|
43 |
-
Wang, Lu",
|
44 |
-
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
|
45 |
-
month = jun,
|
46 |
-
year = "2021",
|
47 |
-
address = "Online",
|
48 |
-
publisher = "Association for Computational Linguistics",
|
49 |
-
url = "https://aclanthology.org/2021.naacl-main.112",
|
50 |
-
doi = "10.18653/v1/2021.naacl-main.112",
|
51 |
-
pages = "1419--1436",
|
52 |
-
abstract = "The quadratic computational and memory complexities of large Transformers have limited their scalability for long document summarization. In this paper, we propose Hepos, a novel efficient encoder-decoder attention with head-wise positional strides to effectively pinpoint salient information from the source. We further conduct a systematic study of existing efficient self-attentions. Combined with Hepos, we are able to process ten times more tokens than existing models that use full attentions. For evaluation, we present a new dataset, GovReport, with significantly longer documents and summaries. Results show that our models produce significantly higher ROUGE scores than competitive comparisons, including new state-of-the-art results on PubMed. Human evaluation also shows that our models generate more informative summaries with fewer unfaithful errors.",
|
53 |
-
}"""
|
54 |
-
|
55 |
-
_ARXIV_CITATION = r"""
|
56 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/arxiv.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
from typing import NoReturn
|
2 |
-
|
3 |
-
from configs.fs import FSConfig
|
4 |
-
|
5 |
-
|
6 |
-
class ArxivConfig(FSConfig):
|
7 |
-
def __init__(self, **kwargs):
|
8 |
-
super().__init__(**kwargs)
|
9 |
-
|
10 |
-
@property
|
11 |
-
def id_key(self) -> str:
|
12 |
-
return "article_id"
|
13 |
-
|
14 |
-
@property
|
15 |
-
def source_key(self) -> str:
|
16 |
-
return "article_text"
|
17 |
-
|
18 |
-
@property
|
19 |
-
def target_key(self) -> str:
|
20 |
-
return "abstract_text"
|
21 |
-
|
22 |
-
@property
|
23 |
-
def train_file(self) -> str:
|
24 |
-
return "train.txt"
|
25 |
-
|
26 |
-
@property
|
27 |
-
def validation_file(self) -> str:
|
28 |
-
return "val.txt"
|
29 |
-
|
30 |
-
@property
|
31 |
-
def test_file(self) -> str:
|
32 |
-
return "test.txt"
|
33 |
-
|
34 |
-
def process(self, example) -> NoReturn:
|
35 |
-
example[self.source_key] = " ".join(example[self.source_key])
|
36 |
-
example[self.target_key] = " ".join(example[self.target_key]).replace("<S>", "").replace("</S>", "")
|
37 |
-
del example["labels"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/fs.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
from abc import abstractmethod
|
2 |
-
from typing import Optional, NoReturn, Union
|
3 |
-
|
4 |
-
import datasets
|
5 |
-
|
6 |
-
|
7 |
-
class FSConfig(datasets.BuilderConfig):
|
8 |
-
"""BuilderConfig for FS."""
|
9 |
-
|
10 |
-
def __init__(self, additional_features, data_url, citation, url, **kwargs):
|
11 |
-
"""BuilderConfig for FS.
|
12 |
-
Args:
|
13 |
-
additional_features: `list[string]`, list of the features that will appear in the feature dict
|
14 |
-
additionally to the self.id_key, self.source_key and self.target_key. Should not include "label".
|
15 |
-
data_url: `string`, url to download the zip file from.
|
16 |
-
citation: `string`, citation for the data set.
|
17 |
-
url: `string`, url for information about the data set.
|
18 |
-
label_classes: `list[string]`, the list of classes for the label if the
|
19 |
-
label is present as a string. Non-string labels will be cast to either
|
20 |
-
'False' or 'True'.
|
21 |
-
**kwargs: keyword arguments forwarded to super.
|
22 |
-
"""
|
23 |
-
super(FSConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
24 |
-
self.features = [self.id_key, self.source_key, self.target_key] + additional_features
|
25 |
-
if self.question_key:
|
26 |
-
self.features += [self.question_key]
|
27 |
-
self.data_url = data_url
|
28 |
-
self.citation = citation
|
29 |
-
self.url = url
|
30 |
-
|
31 |
-
@property
|
32 |
-
@abstractmethod
|
33 |
-
def id_key(self) -> str:
|
34 |
-
pass
|
35 |
-
|
36 |
-
@property
|
37 |
-
@abstractmethod
|
38 |
-
def train_file(self) -> str:
|
39 |
-
pass
|
40 |
-
|
41 |
-
@property
|
42 |
-
@abstractmethod
|
43 |
-
def validation_file(self) -> str:
|
44 |
-
pass
|
45 |
-
|
46 |
-
@property
|
47 |
-
@abstractmethod
|
48 |
-
def test_file(self) -> str:
|
49 |
-
pass
|
50 |
-
|
51 |
-
@property
|
52 |
-
@abstractmethod
|
53 |
-
def source_key(self) -> str:
|
54 |
-
pass
|
55 |
-
|
56 |
-
@property
|
57 |
-
def question_key(self) -> Union[str, None]:
|
58 |
-
return None
|
59 |
-
|
60 |
-
@property
|
61 |
-
@abstractmethod
|
62 |
-
def target_key(self) -> str:
|
63 |
-
pass
|
64 |
-
|
65 |
-
def process(self, example) -> NoReturn:
|
66 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/scrolls.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from configs.fs import FSConfig
|
2 |
-
|
3 |
-
|
4 |
-
class ScrollsConfig(FSConfig):
|
5 |
-
def __init__(self, **kwargs):
|
6 |
-
super().__init__(**kwargs)
|
7 |
-
|
8 |
-
@property
|
9 |
-
def source_key(self) -> str:
|
10 |
-
return "input"
|
11 |
-
|
12 |
-
@property
|
13 |
-
def target_key(self) -> str:
|
14 |
-
return "output"
|
15 |
-
|
16 |
-
@property
|
17 |
-
def train_file(self) -> str:
|
18 |
-
return "train.jsonl"
|
19 |
-
|
20 |
-
@property
|
21 |
-
def validation_file(self) -> str:
|
22 |
-
return "validation.jsonl"
|
23 |
-
|
24 |
-
@property
|
25 |
-
def test_file(self) -> str:
|
26 |
-
return "test.jsonl"
|
27 |
-
|
28 |
-
@property
|
29 |
-
def id_key(self) -> str:
|
30 |
-
return "pid"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/super_glue.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
from typing import Optional, Union
|
2 |
-
|
3 |
-
from configs.fs import FSConfig
|
4 |
-
|
5 |
-
|
6 |
-
class SuperGLUEConfig(FSConfig):
|
7 |
-
@property
|
8 |
-
def id_key(self) -> str:
|
9 |
-
return "idx"
|
10 |
-
|
11 |
-
@property
|
12 |
-
def target_key(self) -> str:
|
13 |
-
return "label"
|
14 |
-
|
15 |
-
@property
|
16 |
-
def train_file(self) -> str:
|
17 |
-
return "train.jsonl"
|
18 |
-
|
19 |
-
@property
|
20 |
-
def validation_file(self) -> str:
|
21 |
-
return "val.jsonl"
|
22 |
-
|
23 |
-
@property
|
24 |
-
def test_file(self) -> str:
|
25 |
-
return "test.jsonl"
|
26 |
-
|
27 |
-
|
28 |
-
class BoolQConfig(SuperGLUEConfig):
|
29 |
-
|
30 |
-
@property
|
31 |
-
def source_key(self) -> str:
|
32 |
-
return "passage"
|
33 |
-
|
34 |
-
@property
|
35 |
-
def question_key(self) -> Union[str, None]:
|
36 |
-
return "question"
|
37 |
-
|
38 |
-
|
39 |
-
class RTEConfig(SuperGLUEConfig):
|
40 |
-
|
41 |
-
# TODO HACK - we treat premise == source, hypothesis == question
|
42 |
-
@property
|
43 |
-
def source_key(self) -> str:
|
44 |
-
return "premise"
|
45 |
-
|
46 |
-
@property
|
47 |
-
def question_key(self) -> Union[str, None]:
|
48 |
-
return "hypothesis"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/arxiv_debug.zip
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:51575dfb34c29cc1646f444cce45f1e47f36839682c9e6c78a68fc53e40ce915
|
3 |
-
size 954416
|
|
|
|
|
|
|
|
data/summ_screen_fd_debug.zip
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:735c0ee602901d0e6a548d104812fd60733a904d264d4a36d2a494920de747c3
|
3 |
-
size 685706
|
|
|
|
|
|
|
|
debug.py
CHANGED
@@ -1,13 +1,7 @@
|
|
1 |
-
|
2 |
-
from datasets import load_dataset
|
3 |
-
|
4 |
-
def main():
|
5 |
-
# dataset = load_dataset("tau/fs",name="summ_screen_fd", max_source_length=512, tokenizer=tokenizer, prompt="Summary:")
|
6 |
-
ssfd_debug = load_dataset("/Users/yuvalkirstain/repos/fs", name="summ_screen_fd")
|
7 |
-
x = 5
|
8 |
-
# arxiv_debug = load_dataset("/Users/yuvalkirstain/repos/fs", name="arxiv_debug", max_source_length=512,
|
9 |
-
# tokenizer=tokenizer, prompt="Summarize the above:")
|
10 |
-
|
11 |
|
12 |
if __name__ == '__main__':
|
13 |
-
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
if __name__ == '__main__':
|
4 |
+
dataset = datasets.load_dataset("fs.py", 'arxiv', streaming=True, split="validation")
|
5 |
+
it = iter(dataset)
|
6 |
+
a = next(it)
|
7 |
+
x = 5
|
fs.py
CHANGED
@@ -1,74 +1,94 @@
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import os
|
|
|
3 |
|
4 |
import datasets
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
)
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
19 |
BUILDER_CONFIGS = [
|
20 |
-
# word level
|
21 |
-
BoolQConfig(
|
22 |
-
additional_features=[],
|
23 |
-
name="boolq",
|
24 |
-
description="", # TODO
|
25 |
-
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
|
26 |
-
citation="", # TODO
|
27 |
-
url="" # TODO
|
28 |
-
),
|
29 |
-
RTEConfig(
|
30 |
-
additional_features=[],
|
31 |
-
name="rte",
|
32 |
-
description="", # TODO
|
33 |
-
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/RTE.zip",
|
34 |
-
citation="", # TODO
|
35 |
-
url=""
|
36 |
-
),
|
37 |
-
# paragraph level
|
38 |
-
ScrollsConfig(
|
39 |
-
additional_features=["id"],
|
40 |
-
name="summ_screen_fd_debug",
|
41 |
-
description=_SUMM_SCREEN_DESCRIPTION,
|
42 |
-
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/summ_screen_fd_debug.zip",
|
43 |
-
citation=_SUMM_SCREEN_CITATION,
|
44 |
-
url="https://github.com/mingdachen/SummScreen"
|
45 |
-
),
|
46 |
-
ScrollsConfig(
|
47 |
-
additional_features=["id"],
|
48 |
-
name="gov_report",
|
49 |
-
description=_GOV_REPORT_CITATION,
|
50 |
-
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/gov_report.zip",
|
51 |
-
citation=_GOV_REPORT_DESCRIPTION,
|
52 |
-
url="https://gov-report-data.github.io/"
|
53 |
-
),
|
54 |
ArxivConfig(
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
data_url="https://huggingface.co/datasets/tau/fs/resolve/main/data/arxiv_debug.zip",
|
59 |
-
citation=_ARXIV_DESCRIPTION,
|
60 |
-
url="https://github.com/armancohan/long-summarization"
|
61 |
-
),
|
62 |
]
|
63 |
|
64 |
def _info(self):
|
65 |
features = {feature: datasets.Value("string") for feature in self.config.features}
|
66 |
|
67 |
return datasets.DatasetInfo(
|
68 |
-
description=
|
69 |
features=datasets.Features(features),
|
70 |
-
homepage=
|
71 |
-
citation=
|
72 |
)
|
73 |
|
74 |
def _split_generators(self, dl_manager):
|
@@ -85,31 +105,31 @@ class FS(datasets.GeneratorBasedBuilder):
|
|
85 |
datasets.SplitGenerator(
|
86 |
name=datasets.Split.TRAIN,
|
87 |
gen_kwargs={
|
88 |
-
"data_file": os.path.join(dl_dir,
|
|
|
89 |
},
|
90 |
),
|
91 |
datasets.SplitGenerator(
|
92 |
name=datasets.Split.VALIDATION,
|
93 |
gen_kwargs={
|
94 |
-
"data_file": os.path.join(dl_dir,
|
|
|
95 |
},
|
96 |
),
|
97 |
datasets.SplitGenerator(
|
98 |
name=datasets.Split.TEST,
|
99 |
gen_kwargs={
|
100 |
-
"data_file": os.path.join(dl_dir,
|
|
|
101 |
},
|
102 |
),
|
103 |
]
|
104 |
|
105 |
-
def _generate_examples(self, data_file):
|
106 |
with open(data_file, encoding="utf-8") as f:
|
107 |
for line in f:
|
108 |
row = json.loads(line)
|
109 |
-
self.config.
|
110 |
-
if self.config.target_key not in row:
|
111 |
-
row[self.config.target_key] = None
|
112 |
-
yield row[self.config.id_key], row
|
113 |
|
114 |
|
115 |
def _get_task_name_from_data_url(data_url):
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Lint as: python3
|
3 |
+
"""The SCROLLS benchmark."""
|
4 |
+
|
5 |
import json
|
6 |
import os
|
7 |
+
from abc import abstractmethod
|
8 |
|
9 |
import datasets
|
10 |
+
|
11 |
+
|
12 |
+
class FewsionConfig(datasets.BuilderConfig):
|
13 |
+
"""BuilderConfig for SCROLLS."""
|
14 |
+
|
15 |
+
def __init__(self, data_url, **kwargs):
|
16 |
+
"""BuilderConfig for SCROLLS.
|
17 |
+
Args:
|
18 |
+
features: `list[string]`, list of the features that will appear in the
|
19 |
+
feature dict. Should not include "label".
|
20 |
+
data_url: `string`, url to download the zip file from.
|
21 |
+
citation: `string`, citation for the data set.
|
22 |
+
url: `string`, url for information about the data set.
|
23 |
+
label_classes: `list[string]`, the list of classes for the label if the
|
24 |
+
label is present as a string. Non-string labels will be cast to either
|
25 |
+
'False' or 'True'.
|
26 |
+
**kwargs: keyword arguments forwarded to super.
|
27 |
+
"""
|
28 |
+
super(FewsionConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
|
29 |
+
self.data_url = data_url
|
30 |
+
self.features = [self.source_column_name, self.target_column_name, self.id_column_name]
|
31 |
+
if self.question_column_name:
|
32 |
+
self.features.append(self.question_column_name)
|
33 |
+
|
34 |
+
@property
|
35 |
+
@abstractmethod
|
36 |
+
def source_column_name(self) -> str:
|
37 |
+
pass
|
38 |
+
|
39 |
+
@property
|
40 |
+
@abstractmethod
|
41 |
+
def target_column_name(self) -> str:
|
42 |
+
pass
|
43 |
+
|
44 |
+
@property
|
45 |
+
@abstractmethod
|
46 |
+
def question_column_name(self) -> str:
|
47 |
+
pass
|
48 |
+
|
49 |
+
@property
|
50 |
+
@abstractmethod
|
51 |
+
def id_column_name(self) -> str:
|
52 |
+
pass
|
53 |
+
|
54 |
+
|
55 |
+
class ArxivConfig(FewsionConfig):
|
56 |
+
|
57 |
+
@property
|
58 |
+
def source_column_name(self) -> str:
|
59 |
+
return "article"
|
60 |
+
|
61 |
+
@property
|
62 |
+
def target_column_name(self) -> str:
|
63 |
+
return "abstract"
|
64 |
+
|
65 |
+
@property
|
66 |
+
def question_column_name(self) -> str:
|
67 |
+
pass
|
68 |
+
|
69 |
+
@property
|
70 |
+
def id_column_name(self) -> str:
|
71 |
+
return "article_id"
|
72 |
+
|
73 |
+
|
74 |
+
class Fewsion(datasets.GeneratorBasedBuilder):
|
75 |
+
|
76 |
DEFAULT_WRITER_BATCH_SIZE = 1000 # because Narrative QA is a rather large dataset
|
77 |
BUILDER_CONFIGS = [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
ArxivConfig(
|
79 |
+
name="arxiv",
|
80 |
+
data_url="https://fewsion.s3.us-east-2.amazonaws.com/arxiv.zip",
|
81 |
+
)
|
|
|
|
|
|
|
|
|
82 |
]
|
83 |
|
84 |
def _info(self):
|
85 |
features = {feature: datasets.Value("string") for feature in self.config.features}
|
86 |
|
87 |
return datasets.DatasetInfo(
|
88 |
+
description="",
|
89 |
features=datasets.Features(features),
|
90 |
+
homepage="",
|
91 |
+
citation="",
|
92 |
)
|
93 |
|
94 |
def _split_generators(self, dl_manager):
|
|
|
105 |
datasets.SplitGenerator(
|
106 |
name=datasets.Split.TRAIN,
|
107 |
gen_kwargs={
|
108 |
+
"data_file": os.path.join(dl_dir, "train.jsonl"),
|
109 |
+
"split": datasets.Split.TRAIN,
|
110 |
},
|
111 |
),
|
112 |
datasets.SplitGenerator(
|
113 |
name=datasets.Split.VALIDATION,
|
114 |
gen_kwargs={
|
115 |
+
"data_file": os.path.join(dl_dir, "val.jsonl"),
|
116 |
+
"split": datasets.Split.VALIDATION,
|
117 |
},
|
118 |
),
|
119 |
datasets.SplitGenerator(
|
120 |
name=datasets.Split.TEST,
|
121 |
gen_kwargs={
|
122 |
+
"data_file": os.path.join(dl_dir, "test.jsonl") if data_files is None else data_files["test"],
|
123 |
+
"split": datasets.Split.TEST,
|
124 |
},
|
125 |
),
|
126 |
]
|
127 |
|
128 |
+
def _generate_examples(self, data_file, split):
|
129 |
with open(data_file, encoding="utf-8") as f:
|
130 |
for line in f:
|
131 |
row = json.loads(line)
|
132 |
+
yield row[self.config.id_column_name], row
|
|
|
|
|
|
|
133 |
|
134 |
|
135 |
def _get_task_name_from_data_url(data_url):
|
normalize_raw_data/normalize_scrolls.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import shutil
|
3 |
-
from fire import Fire
|
4 |
-
from datasets import load_dataset
|
5 |
-
from icecream import ic
|
6 |
-
|
7 |
-
def normalize_example(example):
|
8 |
-
return {"source": example["input"], "target": example["output"]}
|
9 |
-
|
10 |
-
|
11 |
-
def main(dataset_name, num_proc=5, data_dir="../data/"):
|
12 |
-
dataset = load_dataset("tau/scrolls", dataset_name)
|
13 |
-
dataset = dataset.map(normalize_example, num_proc=num_proc, remove_columns=["input", "output"])
|
14 |
-
# ic(dataset_name, dataset["train"][0])
|
15 |
-
dir_name = os.path.join(data_dir, dataset_name)
|
16 |
-
os.makedirs(dir_name, exist_ok=True)
|
17 |
-
for split in dataset:
|
18 |
-
dataset[split].to_json(os.path.join(dir_name, f"{split}.jsonl"))
|
19 |
-
shutil.make_archive(base_name=dir_name,
|
20 |
-
format='zip',
|
21 |
-
root_dir=dir_name)
|
22 |
-
shutil.rmtree(dir_name)
|
23 |
-
|
24 |
-
|
25 |
-
if __name__ == '__main__':
|
26 |
-
Fire(main)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|