Update climate-evaluation.py
Browse files- climate-evaluation.py +46 -15
climate-evaluation.py
CHANGED
@@ -25,11 +25,11 @@ Datasets for Climate Evaluation.
|
|
25 |
|
26 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
27 |
|
28 |
-
|
29 |
|
30 |
_LICENSE = ""
|
31 |
|
32 |
-
_BASE_HF_URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main
|
33 |
|
34 |
# _BASE_HF_URL = Path("./")
|
35 |
|
@@ -353,8 +353,11 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
353 |
# print(f"self.config.data_dir: {self.config.data_dir}")
|
354 |
|
355 |
if self.config.name == "exams" or self.config.name == "translated_exams":
|
|
|
|
|
|
|
356 |
urls_to_download={
|
357 |
-
"test":
|
358 |
}
|
359 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
360 |
return [
|
@@ -368,10 +371,15 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
368 |
]
|
369 |
|
370 |
if self.config.name == "exeter":
|
|
|
|
|
|
|
|
|
|
|
371 |
urls_to_download={
|
372 |
-
"train":
|
373 |
-
"valid":
|
374 |
-
"test":
|
375 |
}
|
376 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
377 |
return [
|
@@ -411,11 +419,20 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
411 |
"test-data/10-Ks (2018, test).tsv",
|
412 |
],
|
413 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
414 |
urls_to_download={
|
415 |
-
"train": [
|
416 |
-
"valid": [
|
417 |
-
"test": [
|
418 |
}
|
|
|
|
|
419 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
420 |
return [
|
421 |
datasets.SplitGenerator(
|
@@ -451,11 +468,18 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
451 |
# "corporations": "Corporations/Corporations Responses/Climate Change",
|
452 |
# "combined": "Combined",
|
453 |
# }
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
urls_to_download={
|
455 |
-
"train":
|
456 |
-
"valid":
|
457 |
-
"test":
|
458 |
}
|
|
|
459 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
460 |
|
461 |
print(f"downloaded_files: {downloaded_files['train']}")
|
@@ -496,11 +520,18 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
496 |
),
|
497 |
]
|
498 |
|
|
|
|
|
|
|
|
|
|
|
|
|
499 |
urls_to_download={
|
500 |
-
"train":
|
501 |
-
"valid":
|
502 |
-
"test":
|
503 |
}
|
|
|
504 |
# print(f"urls_to_download['train']: {urls_to_download['train']}")
|
505 |
# print(f"urls_to_download['valid']: {urls_to_download['valid']}")
|
506 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
|
|
25 |
|
26 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
27 |
|
28 |
+
_URL = ""https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main""
|
29 |
|
30 |
_LICENSE = ""
|
31 |
|
32 |
+
# _BASE_HF_URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main" #Path("./")
|
33 |
|
34 |
# _BASE_HF_URL = Path("./")
|
35 |
|
|
|
353 |
# print(f"self.config.data_dir: {self.config.data_dir}")
|
354 |
|
355 |
if self.config.name == "exams" or self.config.name == "translated_exams":
|
356 |
+
# urls_to_download={
|
357 |
+
# "test": _BASE_HF_URL / data_dir / f"test.csv"
|
358 |
+
# }
|
359 |
urls_to_download={
|
360 |
+
"test": _URL + os.path.join(data_dir or "", "test.csv"),
|
361 |
}
|
362 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
363 |
return [
|
|
|
371 |
]
|
372 |
|
373 |
if self.config.name == "exeter":
|
374 |
+
# urls_to_download={
|
375 |
+
# "train": _BASE_HF_URL / data_dir / f"training.csv",
|
376 |
+
# "valid": _BASE_HF_URL / data_dir / f"validation.csv",
|
377 |
+
# "test": _BASE_HF_URL / data_dir / f"test.csv"
|
378 |
+
# }
|
379 |
urls_to_download={
|
380 |
+
"train": _URL + os.path.join(data_dir or "", "training.csv"),
|
381 |
+
"valid": _URL + os.path.join(data_dir or "", "validation.csv"),
|
382 |
+
"test": _URL + os.path.join(data_dir or "", "test.csv"),
|
383 |
}
|
384 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
385 |
return [
|
|
|
419 |
"test-data/10-Ks (2018, test).tsv",
|
420 |
],
|
421 |
}
|
422 |
+
# os.path.join(data_dir or "", f)
|
423 |
+
# urls_to_download={
|
424 |
+
# "train": [_BASE_HF_URL / data_dir / f for f in files["train"]],
|
425 |
+
# "valid": [_BASE_HF_URL / data_dir / f for f in files["valid"]],
|
426 |
+
# "test": [_BASE_HF_URL / data_dir / f for f in files["test"]],
|
427 |
+
# }
|
428 |
+
|
429 |
urls_to_download={
|
430 |
+
"train": [_URL + os.path.join(data_dir or "", f) for f in files["train"]],
|
431 |
+
"valid": [_URL + os.path.join(data_dir or "", f) for f in files["valid"]],
|
432 |
+
"test": [_URL + os.path.join(data_dir or "", f) for f in files["test"]],
|
433 |
}
|
434 |
+
|
435 |
+
|
436 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
437 |
return [
|
438 |
datasets.SplitGenerator(
|
|
|
468 |
# "corporations": "Corporations/Corporations Responses/Climate Change",
|
469 |
# "combined": "Combined",
|
470 |
# }
|
471 |
+
# urls_to_download={
|
472 |
+
# "train": _BASE_HF_URL / data_dir / f"train_qa.csv",
|
473 |
+
# "valid": _BASE_HF_URL / data_dir / f"val_qa.csv",
|
474 |
+
# "test": _BASE_HF_URL / data_dir / f"test_qa.csv"
|
475 |
+
# }
|
476 |
+
|
477 |
urls_to_download={
|
478 |
+
"train": _URL + os.path.join(data_dir or "", "train_qa.csv"),
|
479 |
+
"valid": _URL + os.path.join(data_dir or "", "val_qa.csv"),
|
480 |
+
"test": _URL + os.path.join(data_dir or "", "test_qa.csv"),
|
481 |
}
|
482 |
+
|
483 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
484 |
|
485 |
print(f"downloaded_files: {downloaded_files['train']}")
|
|
|
520 |
),
|
521 |
]
|
522 |
|
523 |
+
# urls_to_download={
|
524 |
+
# "train": _BASE_HF_URL / data_dir / f"train.csv", #os.path.join(data_dir or "", "train.csv"),
|
525 |
+
# "valid": _BASE_HF_URL / data_dir / f"val.csv", #+ os.path.join(data_dir or "", "val.csv"),
|
526 |
+
# "test": _BASE_HF_URL / data_dir / f"test.csv", #+ os.path.join(data_dir or "", "test.csv")
|
527 |
+
# }
|
528 |
+
|
529 |
urls_to_download={
|
530 |
+
"train": _URL + os.path.join(data_dir or "", "train.csv"),
|
531 |
+
"valid": _URL + os.path.join(data_dir or "", "val.csv"),
|
532 |
+
"test": _URL + os.path.join(data_dir or "", "test.csv")
|
533 |
}
|
534 |
+
|
535 |
# print(f"urls_to_download['train']: {urls_to_download['train']}")
|
536 |
# print(f"urls_to_download['valid']: {urls_to_download['valid']}")
|
537 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|