Datasets:
Upload 2 files
Browse files- DiscoEval.py +25 -51
DiscoEval.py
CHANGED
@@ -18,7 +18,6 @@ import datasets
|
|
18 |
import constants
|
19 |
import pickle
|
20 |
import logging
|
21 |
-
from huggingface_hub import snapshot_download, hf_hub_url, hf_hub_download
|
22 |
|
23 |
_CITATION = """\
|
24 |
@InProceedings{mchen-discoeval-19,
|
@@ -35,15 +34,7 @@ This dataset contains all tasks of the DiscoEval benchmark for sentence represen
|
|
35 |
|
36 |
_HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
|
37 |
|
38 |
-
# TODO: Add link to the official dataset URLs here
|
39 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
40 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
41 |
-
_URLS = {
|
42 |
-
"DiscoEval": "https://huggingface.co/.zip",
|
43 |
-
}
|
44 |
|
45 |
-
|
46 |
-
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
47 |
class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
48 |
"""DiscoEval Benchmark"""
|
49 |
VERSION = datasets.Version("1.1.0")
|
@@ -93,13 +84,24 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
93 |
version=VERSION,
|
94 |
description="The SSP dataset.",
|
95 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
]
|
97 |
|
98 |
-
DEFAULT_CONFIG_NAME = constants.SPARXIV # It's not mandatory to have a default configuration. Just use one if it make sense.
|
99 |
-
|
100 |
def _info(self):
|
101 |
-
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
102 |
-
|
103 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
104 |
features_dict = {
|
105 |
constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
|
@@ -108,6 +110,14 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
108 |
features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SP_LABELS)
|
109 |
features = datasets.Features(features_dict)
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
|
112 |
features_dict = {
|
113 |
constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
|
@@ -148,43 +158,14 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
148 |
features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SSPABS_LABELS)
|
149 |
features = datasets.Features(features_dict)
|
150 |
|
151 |
-
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
|
152 |
-
features = datasets.Features(
|
153 |
-
{
|
154 |
-
"sentence": datasets.Value("string"),
|
155 |
-
"option2": datasets.Value("string"),
|
156 |
-
"second_domain_answer": datasets.Value("string")
|
157 |
-
# These are the features of your dataset like images, labels ...
|
158 |
-
}
|
159 |
-
)
|
160 |
return datasets.DatasetInfo(
|
161 |
-
# This is the description that will appear on the datasets page.
|
162 |
description=_DESCRIPTION,
|
163 |
-
|
164 |
-
features=features, # Here we define them above because they are different between the two configurations
|
165 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
166 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
167 |
-
# supervised_keys=("sentence", "label"),
|
168 |
-
# Homepage of the dataset for documentation
|
169 |
homepage=_HOMEPAGE,
|
170 |
-
# Citation for the dataset
|
171 |
citation=_CITATION,
|
172 |
)
|
173 |
|
174 |
def _split_generators(self, dl_manager):
|
175 |
-
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
176 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
177 |
-
|
178 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
179 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
180 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
181 |
-
|
182 |
-
# urls = _URLS[self.config.name]
|
183 |
-
# data_dir = dl_manager.download_and_extract(urls)
|
184 |
-
data_dir = ''
|
185 |
-
train_name = ''
|
186 |
-
valid_name = ''
|
187 |
-
test_name = ''
|
188 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
189 |
data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
|
190 |
train_name = constants.SP_TRAIN_NAME
|
@@ -235,7 +216,6 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
235 |
return [
|
236 |
datasets.SplitGenerator(
|
237 |
name=datasets.Split.TRAIN,
|
238 |
-
# These kwargs will be passed to _generate_examples
|
239 |
gen_kwargs={
|
240 |
"filepath": downloaded_files['train'],
|
241 |
"split": "train",
|
@@ -243,7 +223,6 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
243 |
),
|
244 |
datasets.SplitGenerator(
|
245 |
name=datasets.Split.VALIDATION,
|
246 |
-
# These kwargs will be passed to _generate_examples
|
247 |
gen_kwargs={
|
248 |
"filepath": downloaded_files['valid'],
|
249 |
"split": "dev",
|
@@ -251,7 +230,6 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
251 |
),
|
252 |
datasets.SplitGenerator(
|
253 |
name=datasets.Split.TEST,
|
254 |
-
# These kwargs will be passed to _generate_examples
|
255 |
gen_kwargs={
|
256 |
"filepath": downloaded_files['test'],
|
257 |
"split": "test"
|
@@ -259,15 +237,11 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
259 |
),
|
260 |
]
|
261 |
|
262 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
263 |
def _generate_examples(self, filepath, split):
|
264 |
logger = logging.getLogger(__name__)
|
265 |
logger.info(f"Current working dir: {os.getcwd()}")
|
266 |
logger.info("generating examples from = %s", filepath)
|
267 |
-
|
268 |
-
print(f"Current working dir: {os.listdir(os.getcwd())}")
|
269 |
-
|
270 |
-
if self.config.name in [constants.RST]:
|
271 |
data = pickle.load(open(filepath, "rb"))
|
272 |
for key, line in enumerate(data):
|
273 |
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|
|
|
18 |
import constants
|
19 |
import pickle
|
20 |
import logging
|
|
|
21 |
|
22 |
_CITATION = """\
|
23 |
@InProceedings{mchen-discoeval-19,
|
|
|
34 |
|
35 |
_HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
|
|
|
|
38 |
class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
39 |
"""DiscoEval Benchmark"""
|
40 |
VERSION = datasets.Version("1.1.0")
|
|
|
84 |
version=VERSION,
|
85 |
description="The SSP dataset.",
|
86 |
),
|
87 |
+
datasets.BuilderConfig(
|
88 |
+
name=constants.BSOARXIV,
|
89 |
+
version=VERSION,
|
90 |
+
description="The BSO Task with the arxiv dataset.",
|
91 |
+
),
|
92 |
+
datasets.BuilderConfig(
|
93 |
+
name=constants.BSOWIKI,
|
94 |
+
version=VERSION,
|
95 |
+
description="The BSO Task with the wiki dataset.",
|
96 |
+
),
|
97 |
+
datasets.BuilderConfig(
|
98 |
+
name=constants.BSOROCSTORY,
|
99 |
+
version=VERSION,
|
100 |
+
description="The BSO Task with the rocstory dataset.",
|
101 |
+
),
|
102 |
]
|
103 |
|
|
|
|
|
104 |
def _info(self):
|
|
|
|
|
105 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
106 |
features_dict = {
|
107 |
constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
|
|
|
110 |
features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SP_LABELS)
|
111 |
features = datasets.Features(features_dict)
|
112 |
|
113 |
+
elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
|
114 |
+
features_dict = {
|
115 |
+
constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
|
116 |
+
for i in range(constants.BSO_TEXT_COLUMNS + 1)
|
117 |
+
}
|
118 |
+
features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.BSO_LABELS)
|
119 |
+
features = datasets.Features(features_dict)
|
120 |
+
|
121 |
elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
|
122 |
features_dict = {
|
123 |
constants.TEXT_COLUMN_NAME[i]: datasets.Value('string')
|
|
|
158 |
features_dict[constants.LABEL_NAME] = datasets.ClassLabel(names=constants.SSPABS_LABELS)
|
159 |
features = datasets.Features(features_dict)
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
return datasets.DatasetInfo(
|
|
|
162 |
description=_DESCRIPTION,
|
163 |
+
features=features,
|
|
|
|
|
|
|
|
|
|
|
164 |
homepage=_HOMEPAGE,
|
|
|
165 |
citation=_CITATION,
|
166 |
)
|
167 |
|
168 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
170 |
data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
|
171 |
train_name = constants.SP_TRAIN_NAME
|
|
|
216 |
return [
|
217 |
datasets.SplitGenerator(
|
218 |
name=datasets.Split.TRAIN,
|
|
|
219 |
gen_kwargs={
|
220 |
"filepath": downloaded_files['train'],
|
221 |
"split": "train",
|
|
|
223 |
),
|
224 |
datasets.SplitGenerator(
|
225 |
name=datasets.Split.VALIDATION,
|
|
|
226 |
gen_kwargs={
|
227 |
"filepath": downloaded_files['valid'],
|
228 |
"split": "dev",
|
|
|
230 |
),
|
231 |
datasets.SplitGenerator(
|
232 |
name=datasets.Split.TEST,
|
|
|
233 |
gen_kwargs={
|
234 |
"filepath": downloaded_files['test'],
|
235 |
"split": "test"
|
|
|
237 |
),
|
238 |
]
|
239 |
|
|
|
240 |
def _generate_examples(self, filepath, split):
|
241 |
logger = logging.getLogger(__name__)
|
242 |
logger.info(f"Current working dir: {os.getcwd()}")
|
243 |
logger.info("generating examples from = %s", filepath)
|
244 |
+
if self.config.name == constants.RST:
|
|
|
|
|
|
|
245 |
data = pickle.load(open(filepath, "rb"))
|
246 |
for key, line in enumerate(data):
|
247 |
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|