OfekGlick commited on
Commit
89dffdf
1 Parent(s): 81b300f

Delete DiscoEval.py

Browse files
Files changed (1) hide show
  1. DiscoEval.py +0 -257
DiscoEval.py DELETED
@@ -1,257 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import os
16
- import io
17
- import datasets
18
- import constants
19
- import pickle
20
- import logging
21
-
22
- _CITATION = """\
23
- @InProceedings{mchen-discoeval-19,
24
- title = {Evaluation Benchmarks and Learning Criteria for Discourse-Aware Sentence Representations},
25
- author = {Mingda Chen and Zewei Chu and Kevin Gimpel},
26
- booktitle = {Proc. of {EMNLP}},
27
- year={2019}
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- This dataset contains all tasks of the DiscoEval benchmark for sentence representation learning.
33
- """
34
-
35
- _HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
36
-
37
-
38
- class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
39
- """DiscoEval Benchmark"""
40
- VERSION = datasets.Version("1.1.0")
41
- BUILDER_CONFIGS = [
42
- datasets.BuilderConfig(
43
- name=constants.SPARXIV,
44
- version=VERSION,
45
- description="Sentence positioning dataset from arXiv",
46
- ),
47
- datasets.BuilderConfig(
48
- name=constants.SPROCSTORY,
49
- version=VERSION,
50
- description="Sentence positioning dataset from ROCStory",
51
- ),
52
- datasets.BuilderConfig(
53
- name=constants.SPWIKI,
54
- version=VERSION,
55
- description="Sentence positioning dataset from Wikipedia",
56
- ),
57
- datasets.BuilderConfig(
58
- name=constants.DCCHAT,
59
- version=VERSION,
60
- description="Discourse Coherence dataset from chat",
61
- ),
62
- datasets.BuilderConfig(
63
- name=constants.DCWIKI,
64
- version=VERSION,
65
- description="Discourse Coherence dataset from Wikipedia",
66
- ),
67
- datasets.BuilderConfig(
68
- name=constants.RST,
69
- version=VERSION,
70
- description="The RST Discourse Treebank dataset ",
71
- ),
72
- datasets.BuilderConfig(
73
- name=constants.PDTB_E,
74
- version=VERSION,
75
- description="The Penn Discourse Treebank - Explicit dataset.",
76
- ),
77
- datasets.BuilderConfig(
78
- name=constants.PDTB_I,
79
- version=VERSION,
80
- description="The Penn Discourse Treebank - Implicit dataset.",
81
- ),
82
- datasets.BuilderConfig(
83
- name=constants.SSPABS,
84
- version=VERSION,
85
- description="The SSP dataset.",
86
- ),
87
- datasets.BuilderConfig(
88
- name=constants.BSOARXIV,
89
- version=VERSION,
90
- description="The BSO Task with the arxiv dataset.",
91
- ),
92
- datasets.BuilderConfig(
93
- name=constants.BSOWIKI,
94
- version=VERSION,
95
- description="The BSO Task with the wiki dataset.",
96
- ),
97
- datasets.BuilderConfig(
98
- name=constants.BSOROCSTORY,
99
- version=VERSION,
100
- description="The BSO Task with the rocstory dataset.",
101
- ),
102
- ]
103
-
104
- def _info(self):
105
- if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
106
- features_dict = {
107
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
108
- for i in range(constants.SP_TEXT_COLUMNS)
109
- }
110
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SP_LABELS)
111
- features = datasets.Features(features_dict)
112
-
113
- elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
114
- features_dict = {
115
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
116
- for i in range(constants.BSO_TEXT_COLUMNS)
117
- }
118
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.BSO_LABELS)
119
- features = datasets.Features(features_dict)
120
-
121
- elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
122
- features_dict = {
123
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
124
- for i in range(constants.DC_TEXT_COLUMNS)
125
- }
126
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.DC_LABELS)
127
- features = datasets.Features(features_dict)
128
-
129
- elif self.config.name in [constants.RST]:
130
- features_dict = {
131
- constants.TEXT_COLUMN_NAME[i]: [datasets.Value('string')]
132
- for i in range(constants.RST_TEXT_COLUMNS)
133
- }
134
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.RST_LABELS)
135
- features = datasets.Features(features_dict)
136
-
137
- elif self.config.name in [constants.PDTB_E]:
138
- features_dict = {
139
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
140
- for i in range(constants.PDTB_E_TEXT_COLUMNS)
141
- }
142
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_E_LABELS)
143
- features = datasets.Features(features_dict)
144
-
145
- elif self.config.name in [constants.PDTB_I]:
146
- features_dict = {
147
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
148
- for i in range(constants.PDTB_I_TEXT_COLUMNS)
149
- }
150
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.PDTB_I_LABELS)
151
- features = datasets.Features(features_dict)
152
-
153
- elif self.config.name in [constants.SSPABS]:
154
- features_dict = {
155
- constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
156
- for i in range(constants.SSPABS_TEXT_COLUMNS)
157
- }
158
- features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SSPABS_LABELS)
159
- features = datasets.Features(features_dict)
160
-
161
- return datasets.DatasetInfo(
162
- description=_DESCRIPTION,
163
- features=features,
164
- homepage=_HOMEPAGE,
165
- citation=_CITATION,
166
- )
167
-
168
- def _split_generators(self, dl_manager):
169
- if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
170
- data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
171
- train_name = constants.SP_TRAIN_NAME
172
- valid_name = constants.SP_VALID_NAME
173
- test_name = constants.SP_TEST_NAME
174
-
175
- elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
176
- data_dir = constants.BSO_DATA_DIR + "/" + constants.BSO_DIRS[self.config.name]
177
- train_name = constants.BSO_TRAIN_NAME
178
- valid_name = constants.BSO_VALID_NAME
179
- test_name = constants.BSO_TEST_NAME
180
-
181
- elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
182
- data_dir = constants.DC_DATA_DIR + "/" + constants.DC_DIRS[self.config.name]
183
- train_name = constants.DC_TRAIN_NAME
184
- valid_name = constants.DC_VALID_NAME
185
- test_name = constants.DC_TEST_NAME
186
-
187
- elif self.config.name in [constants.RST]:
188
- data_dir = constants.RST_DATA_DIR
189
- train_name = constants.RST_TRAIN_NAME
190
- valid_name = constants.RST_VALID_NAME
191
- test_name = constants.RST_TEST_NAME
192
-
193
- elif self.config.name in [constants.PDTB_E, constants.PDTB_I]:
194
- data_dir = os.path.join(constants.PDTB_DATA_DIR, constants.PDTB_DIRS[self.config.name])
195
- train_name = constants.PDTB_TRAIN_NAME
196
- valid_name = constants.PDTB_VALID_NAME
197
- test_name = constants.PDTB_TEST_NAME
198
-
199
- elif self.config.name in [constants.SSPABS]:
200
- data_dir = constants.SSPABS_DATA_DIR
201
- train_name = constants.SSPABS_TRAIN_NAME
202
- valid_name = constants.SSPABS_VALID_NAME
203
- test_name = constants.SSPABS_TEST_NAME
204
-
205
- urls_to_download = {
206
- "train": data_dir + "/" + train_name,
207
- "valid": data_dir + "/" + valid_name,
208
- "test": data_dir + "/" + test_name,
209
- }
210
- logger = logging.getLogger(__name__)
211
- data_dirs = dl_manager.download_and_extract(urls_to_download)
212
- logger.info(f"Data directories: {data_dirs}")
213
- downloaded_files = dl_manager.download_and_extract(data_dirs)
214
- logger.info(f"Downloading Completed")
215
-
216
- return [
217
- datasets.SplitGenerator(
218
- name=datasets.Split.TRAIN,
219
- gen_kwargs={
220
- "filepath": downloaded_files['train'],
221
- "split": "train",
222
- },
223
- ),
224
- datasets.SplitGenerator(
225
- name=datasets.Split.VALIDATION,
226
- gen_kwargs={
227
- "filepath": downloaded_files['valid'],
228
- "split": "dev",
229
- },
230
- ),
231
- datasets.SplitGenerator(
232
- name=datasets.Split.TEST,
233
- gen_kwargs={
234
- "filepath": downloaded_files['test'],
235
- "split": "test"
236
- },
237
- ),
238
- ]
239
-
240
- def _generate_examples(self, filepath, split):
241
- logger = logging.getLogger(__name__)
242
- logger.info(f"Current working dir: {os.getcwd()}")
243
- logger.info("generating examples from = %s", filepath)
244
- if self.config.name == constants.RST:
245
- data = pickle.load(open(filepath, "rb"))
246
- for key, line in enumerate(data):
247
- example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
248
- example[constants.LABEL_NAME] = line[0]
249
- yield key, example
250
-
251
- else:
252
- with io.open(filepath, mode='r', encoding='utf-8') as f:
253
- for key, line in enumerate(f):
254
- line = line.strip().split("\t")
255
- example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
256
- example[constants.LABEL_NAME] = line[0]
257
- yield key, example