Datasets:
GEM
/

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
Sebastian Gehrmann commited on
Commit
b212506
1 Parent(s): 565dbb2
Files changed (2) hide show
  1. dataset_infos.json +200 -0
  2. wiki_auto_asset_turk.py +219 -0
dataset_infos.json ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "wiki_auto_asset_turk": {
3
+ "description": "GEM is a benchmark environment for Natural Language Generation with a focus on its Evaluation,\nboth through human annotations and automated Metrics.\n\nGEM aims to:\n- measure NLG progress across 13 datasets spanning many NLG tasks and languages.\n- provide an in-depth analysis of data and models presented via data statements and challenge sets.\n- develop standards for evaluation of generated text using both automated and human metrics.\n\nIt is our goal to regularly update GEM and to encourage toward more inclusive practices in dataset development\nby extending existing data or developing datasets for additional languages.\n",
4
+ "citation": "@article{gem_benchmark,\n author = {Sebastian Gehrmann and\n Tosin P. Adewumi and\n Karmanya Aggarwal and\n Pawan Sasanka Ammanamanchi and\n Aremu Anuoluwapo and\n Antoine Bosselut and\n Khyathi Raghavi Chandu and\n Miruna{-}Adriana Clinciu and\n Dipanjan Das and\n Kaustubh D. Dhole and\n Wanyu Du and\n Esin Durmus and\n Ondrej Dusek and\n Chris Emezue and\n Varun Gangal and\n Cristina Garbacea and\n Tatsunori Hashimoto and\n Yufang Hou and\n Yacine Jernite and\n Harsh Jhamtani and\n Yangfeng Ji and\n Shailza Jolly and\n Dhruv Kumar and\n Faisal Ladhak and\n Aman Madaan and\n Mounica Maddela and\n Khyati Mahajan and\n Saad Mahamood and\n Bodhisattwa Prasad Majumder and\n Pedro Henrique Martins and\n Angelina McMillan{-}Major and\n Simon Mille and\n Emiel van Miltenburg and\n Moin Nadeem and\n Shashi Narayan and\n Vitaly Nikolaev and\n Rubungo Andre Niyongabo and\n Salomey Osei and\n Ankur P. Parikh and\n Laura Perez{-}Beltrachini and\n Niranjan Ramesh Rao and\n Vikas Raunak and\n Juan Diego Rodriguez and\n Sashank Santhanam and\n Joao Sedoc and\n Thibault Sellam and\n Samira Shaikh and\n Anastasia Shimorina and\n Marco Antonio Sobrevilla Cabezudo and\n Hendrik Strobelt and\n Nishant Subramani and\n Wei Xu and\n Diyi Yang and\n Akhila Yerukola and\n Jiawei Zhou},\n title = {The {GEM} Benchmark: Natural Language Generation, its Evaluation and\n Metrics},\n journal = {CoRR},\n volume = {abs/2102.01672},\n year = {2021},\n url = {https://arxiv.org/abs/2102.01672},\n archivePrefix = {arXiv},\n eprint = {2102.01672}\n}\n",
5
+ "homepage": "https://gem-benchmark.github.io/",
6
+ "license": "CC-BY-SA-4.0",
7
+ "features": {
8
+ "gem_id": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "gem_parent_id": {
14
+ "dtype": "string",
15
+ "id": null,
16
+ "_type": "Value"
17
+ },
18
+ "source": {
19
+ "dtype": "string",
20
+ "id": null,
21
+ "_type": "Value"
22
+ },
23
+ "target": {
24
+ "dtype": "string",
25
+ "id": null,
26
+ "_type": "Value"
27
+ },
28
+ "references": [
29
+ {
30
+ "dtype": "string",
31
+ "id": null,
32
+ "_type": "Value"
33
+ }
34
+ ]
35
+ },
36
+ "post_processed": null,
37
+ "supervised_keys": null,
38
+ "builder_name": "gem",
39
+ "config_name": "wiki_auto_asset_turk",
40
+ "version": {
41
+ "version_str": "1.1.0",
42
+ "description": null,
43
+ "major": 1,
44
+ "minor": 1,
45
+ "patch": 0
46
+ },
47
+ "splits": {
48
+ "train": {
49
+ "name": "train",
50
+ "num_bytes": 161096555,
51
+ "num_examples": 483801,
52
+ "dataset_name": "gem"
53
+ },
54
+ "validation": {
55
+ "name": "validation",
56
+ "num_bytes": 8211356,
57
+ "num_examples": 20000,
58
+ "dataset_name": "gem"
59
+ },
60
+ "test_asset": {
61
+ "name": "test_asset",
62
+ "num_bytes": 475360,
63
+ "num_examples": 359,
64
+ "dataset_name": "gem"
65
+ },
66
+ "test_turk": {
67
+ "name": "test_turk",
68
+ "num_bytes": 406866,
69
+ "num_examples": 359,
70
+ "dataset_name": "gem"
71
+ },
72
+ "challenge_train_sample": {
73
+ "name": "challenge_train_sample",
74
+ "num_bytes": 219566,
75
+ "num_examples": 500,
76
+ "dataset_name": "gem"
77
+ },
78
+ "challenge_validation_sample": {
79
+ "name": "challenge_validation_sample",
80
+ "num_bytes": 213072,
81
+ "num_examples": 500,
82
+ "dataset_name": "gem"
83
+ },
84
+ "challenge_test_asset_backtranslation": {
85
+ "name": "challenge_test_asset_backtranslation",
86
+ "num_bytes": 436844,
87
+ "num_examples": 359,
88
+ "dataset_name": "gem"
89
+ },
90
+ "challenge_test_asset_bfp02": {
91
+ "name": "challenge_test_asset_bfp02",
92
+ "num_bytes": 432766,
93
+ "num_examples": 359,
94
+ "dataset_name": "gem"
95
+ },
96
+ "challenge_test_asset_bfp05": {
97
+ "name": "challenge_test_asset_bfp05",
98
+ "num_bytes": 432766,
99
+ "num_examples": 359,
100
+ "dataset_name": "gem"
101
+ },
102
+ "challenge_test_asset_nopunc": {
103
+ "name": "challenge_test_asset_nopunc",
104
+ "num_bytes": 432759,
105
+ "num_examples": 359,
106
+ "dataset_name": "gem"
107
+ },
108
+ "challenge_test_turk_backtranslation": {
109
+ "name": "challenge_test_turk_backtranslation",
110
+ "num_bytes": 417228,
111
+ "num_examples": 359,
112
+ "dataset_name": "gem"
113
+ },
114
+ "challenge_test_turk_bfp02": {
115
+ "name": "challenge_test_turk_bfp02",
116
+ "num_bytes": 414405,
117
+ "num_examples": 359,
118
+ "dataset_name": "gem"
119
+ },
120
+ "challenge_test_turk_bfp05": {
121
+ "name": "challenge_test_turk_bfp05",
122
+ "num_bytes": 414407,
123
+ "num_examples": 359,
124
+ "dataset_name": "gem"
125
+ },
126
+ "challenge_test_turk_nopunc": {
127
+ "name": "challenge_test_turk_nopunc",
128
+ "num_bytes": 414412,
129
+ "num_examples": 359,
130
+ "dataset_name": "gem"
131
+ }
132
+ },
133
+ "download_checksums": {
134
+ "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.tsv": {
135
+ "num_bytes": 120678315,
136
+ "checksum": "0ed9ea351922ba39a9a2a5a15293619af5f2a94b9ead86b7ef2007bfcb76aadd"
137
+ },
138
+ "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/valid.tsv": {
139
+ "num_bytes": 4338364,
140
+ "checksum": "6be79b5d014a27facc0f3e892cef35774f48f6e08e4d6eefafb801bcf2ab7b09"
141
+ },
142
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json": {
143
+ "num_bytes": 452091,
144
+ "checksum": "5a1c82b5b0ca1891efc2d1465045f4866a8794e6322bc7386b5501aaac41ac57"
145
+ },
146
+ "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip": {
147
+ "num_bytes": 1061032,
148
+ "checksum": "3dc8e070c8afabde606366bf49fa81b0b62f95933035cc9ea0381d149948f52d"
149
+ },
150
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.orig": {
151
+ "num_bytes": 43745,
152
+ "checksum": "673ceb2672a37168a52040d75e16f9ffd1e3777b9f68e19207f2adf6542723f1"
153
+ },
154
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.0": {
155
+ "num_bytes": 35457,
156
+ "checksum": "66f36029d0c732eb92886021faefe531c6cfd0a32bdbe7ae4aa97fd45bd1b046"
157
+ },
158
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.1": {
159
+ "num_bytes": 34096,
160
+ "checksum": "d323ceb364abbe84c79b14b028aa1ff563cd94955fbab19049612548dbb0f83f"
161
+ },
162
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.2": {
163
+ "num_bytes": 34348,
164
+ "checksum": "786b55f8425ce4a993e98be5e2bea9ef87bf536b96dc13f7a57c4733fdb63e06"
165
+ },
166
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.3": {
167
+ "num_bytes": 37292,
168
+ "checksum": "e211c9e2ede1dfe315097132dbe4feda76b309bdc636a5394cb5d2664ba5bf52"
169
+ },
170
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.4": {
171
+ "num_bytes": 35887,
172
+ "checksum": "37be9cf0592c0f68d87848dc9c442fe62f344518c1993896c00788bf943b755d"
173
+ },
174
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.5": {
175
+ "num_bytes": 35351,
176
+ "checksum": "8485210573a3bd76116de8e978b227677c6c207111a4938729397c4e603dfa46"
177
+ },
178
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.6": {
179
+ "num_bytes": 35846,
180
+ "checksum": "f0cb3ab823d23203ea044f81bd7e67cc823db0632095e43b78a54a9891a0b0a8"
181
+ },
182
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.7": {
183
+ "num_bytes": 34560,
184
+ "checksum": "35cbb8b9964252a1470607634f19ad946c6bc2951b3e500eedd826baf12bd3c8"
185
+ },
186
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.8": {
187
+ "num_bytes": 35830,
188
+ "checksum": "047b6419590b88f93b435d3177bba1883dc9c0dc178676e48470b408236446f4"
189
+ },
190
+ "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.9": {
191
+ "num_bytes": 35313,
192
+ "checksum": "3f5745e4f2743563b88ea4284ec35fa4ddb68d62de80b63ffb87751b998fe6b8"
193
+ }
194
+ },
195
+ "download_size": 126927527,
196
+ "post_processing_size": null,
197
+ "dataset_size": 174018362,
198
+ "size_in_bytes": 300945889
199
+ }
200
+ }
wiki_auto_asset_turk.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{jiang-etal-2020-neural,
8
+ title = "Neural {CRF} Model for Sentence Alignment in Text Simplification",
9
+ author = "Jiang, Chao and
10
+ Maddela, Mounica and
11
+ Lan, Wuwei and
12
+ Zhong, Yang and
13
+ Xu, Wei",
14
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
15
+ month = jul,
16
+ year = "2020",
17
+ address = "Online",
18
+ publisher = "Association for Computational Linguistics",
19
+ url = "https://www.aclweb.org/anthology/2020.acl-main.709",
20
+ doi = "10.18653/v1/2020.acl-main.709",
21
+ pages = "7943--7960",
22
+ }
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+ WikiAuto provides a set of aligned sentences from English Wikipedia and Simple
27
+ English Wikipedia as a resource to train sentence simplification systems.
28
+
29
+ The authors first crowd-sourced a set of manual alignments between sentences in
30
+ a subset of the Simple English Wikipedia and their corresponding versions in
31
+ English Wikipedia (this corresponds to the manual config in this version of the
32
+ dataset), then trained a neural CRF system to predict these alignments.
33
+
34
+ The trained alignment prediction model was then applied to the other articles in
35
+ Simple English Wikipedia with an English counterpart to create a larger corpus
36
+ of aligned sentences (corresponding to the auto and auto_acl configs here).
37
+ """
38
+
39
+ _URLs = {
40
+ "train": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/train.tsv",
41
+ "validation": "https://github.com/chaojiang06/wiki-auto/raw/master/wiki-auto/GEM2021/full_with_split/valid.tsv",
42
+ "test_turk": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_turk_detokenized.json",
43
+ "challenge_set": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_challenge_sets/wiki_auto_asset_turk_train_valid.zip",
44
+ }
45
+
46
+ # Add Asset files.
47
+ _URLs[
48
+ "test_asset_orig"
49
+ ] = "https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.orig"
50
+ for i in range(10):
51
+ _URLs[
52
+ f"test_asset_{i}"
53
+ ] = f"https://github.com/facebookresearch/asset/raw/master/dataset/asset.test.simp.{i}"
54
+
55
+
56
+ class E2ENlg(datasets.GeneratorBasedBuilder):
57
+ VERSION = datasets.Version("1.0.0")
58
+ DEFAULT_CONFIG_NAME = "wiki_auto_asset_turk"
59
+
60
+ def _info(self):
61
+ features = datasets.Features(
62
+ {
63
+ "gem_id": datasets.Value("string"),
64
+ "gem_parent_id": datasets.Value("string"),
65
+ "source": datasets.Value("string"),
66
+ "target": datasets.Value("string"),
67
+ "references": [datasets.Value("string")],
68
+ }
69
+ )
70
+
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=features,
74
+ supervised_keys=datasets.info.SupervisedKeysData(
75
+ input="source", output="target"
76
+ ),
77
+ homepage="",
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ """Returns SplitGenerators."""
83
+ dl_dir = dl_manager.download_and_extract(_URLs)
84
+ challenge_sets = [
85
+ (
86
+ "challenge_train_sample",
87
+ "train_wiki_auto_asset_turk_RandomSample500.json",
88
+ ),
89
+ (
90
+ "challenge_validation_sample",
91
+ "validation_wiki_auto_asset_turk_RandomSample500.json",
92
+ ),
93
+ (
94
+ "challenge_test_asset_backtranslation",
95
+ "test_asset_wiki_auto_asset_turk_BackTranslation.json",
96
+ ),
97
+ (
98
+ "challenge_test_asset_bfp02",
99
+ "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
100
+ ),
101
+ (
102
+ "challenge_test_asset_bfp05",
103
+ "test_asset_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
104
+ ),
105
+ (
106
+ "challenge_test_asset_nopunc",
107
+ "test_asset_wiki_auto_asset_turk_WithoutPunctuation.json",
108
+ ),
109
+ (
110
+ "challenge_test_turk_backtranslation",
111
+ "detok_test_turk_wiki_auto_asset_turk_BackTranslation.json",
112
+ ),
113
+ (
114
+ "challenge_test_turk_bfp02",
115
+ "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.02.json",
116
+ ),
117
+ (
118
+ "challenge_test_turk_bfp05",
119
+ "detok_test_turk_wiki_auto_asset_turk_ButterFingersPerturbation_p=0.05.json",
120
+ ),
121
+ (
122
+ "challenge_test_turk_nopunc",
123
+ "detok_test_turk_wiki_auto_asset_turk_WithoutPunctuation.json",
124
+ ),
125
+ ]
126
+ return [
127
+ datasets.SplitGenerator(
128
+ name=datasets.Split.TRAIN,
129
+ gen_kwargs={
130
+ "filepath": dl_dir["train"],
131
+ "split": "train",
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.VALIDATION,
136
+ gen_kwargs={
137
+ "filepath": dl_dir["validation"],
138
+ "split": "validation",
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name="test_asset",
143
+ gen_kwargs={
144
+ "filepath": "",
145
+ "split": "test_asset",
146
+ "filepaths": [dl_dir["test_asset_orig"]]
147
+ + [dl_dir[f"test_asset_{i}"] for i in range(10)],
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name="test_turk",
152
+ gen_kwargs={
153
+ "filepath": dl_dir["test_turk"],
154
+ "split": "test_turk",
155
+ },
156
+ ),
157
+ ] + [
158
+ datasets.SplitGenerator(
159
+ name=challenge_split,
160
+ gen_kwargs={
161
+ "filepath": os.path.join(
162
+ dl_dir["challenge_set"], "wiki_auto_asset_turk", filename
163
+ ),
164
+ "split": challenge_split,
165
+ },
166
+ )
167
+ for challenge_split, filename in challenge_sets
168
+ ]
169
+
170
+ def _generate_examples(self, filepath, split, filepaths=None, lang=None):
171
+ """Yields examples."""
172
+ if split in ["train", "validation"]:
173
+ keys = [
174
+ "source",
175
+ "target",
176
+ ]
177
+ with open(filepath, encoding="utf-8") as f:
178
+ for id_, line in enumerate(f):
179
+ values = line.strip().split("\t")
180
+ assert (
181
+ len(values) == 2
182
+ ), f"Not enough fields in ---- {line} --- {values}"
183
+ example = dict([(k, val) for k, val in zip(keys, values)])
184
+ example["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}"
185
+ example["gem_parent_id"] = example["gem_id"]
186
+ example["references"] = (
187
+ [] if split == "train" else [example["target"]]
188
+ )
189
+ yield id_, example
190
+ elif split == "test_turk":
191
+ examples = json.load(open(filepath, encoding="utf-8"))
192
+ for id_, example in enumerate(examples):
193
+ example["gem_parent_id"] = example["gem_id"]
194
+ for k in ["source_id", "target_id"]:
195
+ if k in example:
196
+ del example[k]
197
+ yield id_, example
198
+ elif split == "test_asset":
199
+ files = [open(f_name, encoding="utf-8") for f_name in filepaths]
200
+ for id_, lines in enumerate(zip(*files)):
201
+ yield id_, {
202
+ "gem_id": f"wiki_auto_asset_turk-{split}-{id_}",
203
+ "gem_parent_id": f"wiki_auto_asset_turk-{split}-{id_}",
204
+ "target": lines[1].strip(),
205
+ "source": lines[0].strip(),
206
+ "references": [line.strip() for line in lines[1:]],
207
+ }
208
+ else:
209
+ exples = json.load(open(filepath, encoding="utf-8"))
210
+ if isinstance(exples, dict):
211
+ assert len(exples) == 1, "multiple entries found"
212
+ exples = list(exples.values())[0]
213
+ for id_, exple in enumerate(exples):
214
+ exple["gem_parent_id"] = exple["gem_id"]
215
+ exple["gem_id"] = f"wiki_auto_asset_turk-{split}-{id_}"
216
+ for k in ["source_id", "target_id"]:
217
+ if k in exple:
218
+ del exple[k]
219
+ yield id_, exple