Datasets:
Uploaded two files
Browse files- DiscoEval.py +32 -66
- constants.py +12 -0
DiscoEval.py
CHANGED
@@ -35,7 +35,6 @@ This dataset contains all tasks of the DiscoEval benchmark for sentence represen
|
|
35 |
|
36 |
_HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
|
37 |
|
38 |
-
|
39 |
# TODO: Add link to the official dataset URLs here
|
40 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
41 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
@@ -180,48 +179,26 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
180 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
181 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
182 |
|
183 |
-
|
184 |
# urls = _URLS[self.config.name]
|
185 |
# data_dir = dl_manager.download_and_extract(urls)
|
186 |
-
|
|
|
|
|
|
|
187 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
188 |
-
# subfolder = os.path.join(constants.SP_DATA_DIR, constants.SP_DIRS[self.config.name])
|
189 |
data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
|
190 |
-
test_dowload = snapshot_download(
|
191 |
-
repo_id="OfekGlick/DiscoEval",
|
192 |
-
repo_type="dataset",
|
193 |
-
local_dir='./',
|
194 |
-
ignore_patterns=["*.py", "*.gitignore", "*.gitattributes", "*.DS_Store", "*.md"],
|
195 |
-
)
|
196 |
-
# train_url = hf_hub_download(
|
197 |
-
# repo_id="OfekGlick/DiscoEval",
|
198 |
-
# filename=constants.SP_TRAIN_NAME,
|
199 |
-
# subfolder=subfolder,
|
200 |
-
# repo_type="dataset",
|
201 |
-
# local_dir='./',
|
202 |
-
# )
|
203 |
-
#
|
204 |
-
# valid_url = hf_hub_download(
|
205 |
-
# repo_id="OfekGlick/DiscoEval",
|
206 |
-
# filename=constants.SP_VALID_NAME,
|
207 |
-
# subfolder=subfolder,
|
208 |
-
# repo_type="dataset",
|
209 |
-
# local_dir='./',
|
210 |
-
# )
|
211 |
-
# text_url = hf_hub_download(
|
212 |
-
# repo_id="OfekGlick/DiscoEval",
|
213 |
-
# filename=constants.SP_TEST_NAME,
|
214 |
-
# subfolder=subfolder,
|
215 |
-
# repo_type="dataset",
|
216 |
-
# local_dir='./',
|
217 |
-
# )
|
218 |
-
# data_dir = dl_manager.download_and_extract(urls)
|
219 |
train_name = constants.SP_TRAIN_NAME
|
220 |
valid_name = constants.SP_VALID_NAME
|
221 |
test_name = constants.SP_TEST_NAME
|
222 |
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
|
224 |
-
data_dir =
|
225 |
train_name = constants.DC_TRAIN_NAME
|
226 |
valid_name = constants.DC_VALID_NAME
|
227 |
test_name = constants.DC_TEST_NAME
|
@@ -244,12 +221,23 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
244 |
valid_name = constants.SSPABS_VALID_NAME
|
245 |
test_name = constants.SSPABS_TEST_NAME
|
246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
return [
|
248 |
datasets.SplitGenerator(
|
249 |
name=datasets.Split.TRAIN,
|
250 |
# These kwargs will be passed to _generate_examples
|
251 |
gen_kwargs={
|
252 |
-
"filepath":
|
253 |
"split": "train",
|
254 |
},
|
255 |
),
|
@@ -257,7 +245,7 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
257 |
name=datasets.Split.VALIDATION,
|
258 |
# These kwargs will be passed to _generate_examples
|
259 |
gen_kwargs={
|
260 |
-
"filepath":
|
261 |
"split": "dev",
|
262 |
},
|
263 |
),
|
@@ -265,53 +253,31 @@ class DiscoEvalSentence(datasets.GeneratorBasedBuilder):
|
|
265 |
name=datasets.Split.TEST,
|
266 |
# These kwargs will be passed to _generate_examples
|
267 |
gen_kwargs={
|
268 |
-
"filepath":
|
269 |
"split": "test"
|
270 |
},
|
271 |
),
|
272 |
]
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
278 |
def _generate_examples(self, filepath, split):
|
279 |
-
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
280 |
-
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
281 |
logger = logging.getLogger(__name__)
|
282 |
logger.info(f"Current working dir: {os.getcwd()}")
|
283 |
logger.info("generating examples from = %s", filepath)
|
284 |
print(f"Current working dir: {os.getcwd()}")
|
285 |
print(f"Current working dir: {os.listdir(os.getcwd())}")
|
286 |
|
287 |
-
|
288 |
-
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI,
|
289 |
-
constants.DCWIKI, constants.DCCHAT,
|
290 |
-
constants.PDTB_E, constants.PDTB_I,
|
291 |
-
constants.SSPABS]:
|
292 |
-
with io.open(filepath, mode='r', encoding='utf-8') as f:
|
293 |
-
for key, line in enumerate(f):
|
294 |
-
line = line.strip().split("\t")
|
295 |
-
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|
296 |
-
example[constants.LABEL_NAME] = line[0]
|
297 |
-
yield key, example
|
298 |
-
|
299 |
-
elif self.config.name in [constants.RST]:
|
300 |
data = pickle.load(open(filepath, "rb"))
|
301 |
for key, line in enumerate(data):
|
302 |
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|
303 |
example[constants.LABEL_NAME] = line[0]
|
304 |
yield key, example
|
305 |
|
306 |
-
# TODO: implement other datasets
|
307 |
else:
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
if __name__ == '__main__':
|
316 |
-
temp = os.path.join(constants.SP_DATA_DIR, constants.SP_DIRS[constants.SPARXIV])
|
317 |
-
ofek = 5
|
|
|
35 |
|
36 |
_HOMEPAGE = "https://github.com/ZeweiChu/DiscoEval"
|
37 |
|
|
|
38 |
# TODO: Add link to the official dataset URLs here
|
39 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
40 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
|
|
179 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
180 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
181 |
|
|
|
182 |
# urls = _URLS[self.config.name]
|
183 |
# data_dir = dl_manager.download_and_extract(urls)
|
184 |
+
data_dir = ''
|
185 |
+
train_name = ''
|
186 |
+
valid_name = ''
|
187 |
+
test_name = ''
|
188 |
if self.config.name in [constants.SPARXIV, constants.SPROCSTORY, constants.SPWIKI]:
|
|
|
189 |
data_dir = constants.SP_DATA_DIR + "/" + constants.SP_DIRS[self.config.name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
train_name = constants.SP_TRAIN_NAME
|
191 |
valid_name = constants.SP_VALID_NAME
|
192 |
test_name = constants.SP_TEST_NAME
|
193 |
|
194 |
+
elif self.config.name in [constants.BSOARXIV, constants.BSOWIKI, constants.BSOROCSTORY]:
|
195 |
+
data_dir = constants.BSO_DATA_DIR + "/" + constants.BSO_DIRS[self.config.name]
|
196 |
+
train_name = constants.BSO_TRAIN_NAME
|
197 |
+
valid_name = constants.BSO_VALID_NAME
|
198 |
+
test_name = constants.BSO_TEST_NAME
|
199 |
+
|
200 |
elif self.config.name in [constants.DCCHAT, constants.DCWIKI]:
|
201 |
+
data_dir = constants.DC_DATA_DIR + "/" + constants.DC_DIRS[self.config.name]
|
202 |
train_name = constants.DC_TRAIN_NAME
|
203 |
valid_name = constants.DC_VALID_NAME
|
204 |
test_name = constants.DC_TEST_NAME
|
|
|
221 |
valid_name = constants.SSPABS_VALID_NAME
|
222 |
test_name = constants.SSPABS_TEST_NAME
|
223 |
|
224 |
+
urls_to_download = {
|
225 |
+
"train": data_dir + "/" + train_name,
|
226 |
+
"valid": data_dir + "/" + valid_name,
|
227 |
+
"test": data_dir + "/" + test_name,
|
228 |
+
}
|
229 |
+
logger = logging.getLogger(__name__)
|
230 |
+
data_dirs = dl_manager.download_and_extract(urls_to_download)
|
231 |
+
logger.info(f"Data directories: {data_dirs}")
|
232 |
+
downloaded_files = dl_manager.download_and_extract(data_dirs)
|
233 |
+
logger.info(f"Downloading Completed")
|
234 |
+
|
235 |
return [
|
236 |
datasets.SplitGenerator(
|
237 |
name=datasets.Split.TRAIN,
|
238 |
# These kwargs will be passed to _generate_examples
|
239 |
gen_kwargs={
|
240 |
+
"filepath": downloaded_files['train'],
|
241 |
"split": "train",
|
242 |
},
|
243 |
),
|
|
|
245 |
name=datasets.Split.VALIDATION,
|
246 |
# These kwargs will be passed to _generate_examples
|
247 |
gen_kwargs={
|
248 |
+
"filepath": downloaded_files['valid'],
|
249 |
"split": "dev",
|
250 |
},
|
251 |
),
|
|
|
253 |
name=datasets.Split.TEST,
|
254 |
# These kwargs will be passed to _generate_examples
|
255 |
gen_kwargs={
|
256 |
+
"filepath": downloaded_files['test'],
|
257 |
"split": "test"
|
258 |
},
|
259 |
),
|
260 |
]
|
261 |
|
|
|
|
|
|
|
262 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
263 |
def _generate_examples(self, filepath, split):
|
|
|
|
|
264 |
logger = logging.getLogger(__name__)
|
265 |
logger.info(f"Current working dir: {os.getcwd()}")
|
266 |
logger.info("generating examples from = %s", filepath)
|
267 |
print(f"Current working dir: {os.getcwd()}")
|
268 |
print(f"Current working dir: {os.listdir(os.getcwd())}")
|
269 |
|
270 |
+
if self.config.name in [constants.RST]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
data = pickle.load(open(filepath, "rb"))
|
272 |
for key, line in enumerate(data):
|
273 |
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|
274 |
example[constants.LABEL_NAME] = line[0]
|
275 |
yield key, example
|
276 |
|
|
|
277 |
else:
|
278 |
+
with io.open(filepath, mode='r', encoding='utf-8') as f:
|
279 |
+
for key, line in enumerate(f):
|
280 |
+
line = line.strip().split("\t")
|
281 |
+
example = {constants.TEXT_COLUMN_NAME[i]: sent for i, sent in enumerate(line[1:])}
|
282 |
+
example[constants.LABEL_NAME] = line[0]
|
283 |
+
yield key, example
|
|
|
|
|
|
|
|
constants.py
CHANGED
@@ -62,6 +62,18 @@ SP_DIRS = {SPARXIV: 'arxiv/', SPROCSTORY: 'rocstory/', SPWIKI: 'wiki/'}
|
|
62 |
SP_LABELS = ["0", "1", "2", "3", "4"]
|
63 |
SP_TEXT_COLUMNS = 5
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
# DC Constants:
|
66 |
DCCHAT = 'DCchat'
|
67 |
DCWIKI = 'DCwiki'
|
|
|
62 |
SP_LABELS = ["0", "1", "2", "3", "4"]
|
63 |
SP_TEXT_COLUMNS = 5
|
64 |
|
65 |
+
# BSO Constants:
|
66 |
+
BSOARXIV = 'BSOarxiv'
|
67 |
+
BSOROCSTORY = 'BSOrocstory'
|
68 |
+
BSOWIKI = 'BSOwiki'
|
69 |
+
BSO_TRAIN_NAME = 'train.txt'
|
70 |
+
BSO_VALID_NAME = 'valid.txt'
|
71 |
+
BSO_TEST_NAME = 'test.txt'
|
72 |
+
BSO_DATA_DIR = 'data/BSO'
|
73 |
+
BSO_DIRS = {BSOARXIV: 'arxiv/', BSOROCSTORY: 'rocstory/', BSOWIKI: 'wiki/'}
|
74 |
+
BSO_LABELS = ["0", "1"]
|
75 |
+
BSO_TEXT_COLUMNS = 2
|
76 |
+
|
77 |
# DC Constants:
|
78 |
DCCHAT = 'DCchat'
|
79 |
DCWIKI = 'DCwiki'
|