Update aclue.py
Browse files
aclue.py
CHANGED
@@ -16,8 +16,7 @@ import os
|
|
16 |
import datasets
|
17 |
import pandas as pd
|
18 |
|
19 |
-
_CITATION = ""
|
20 |
-
"""
|
21 |
|
22 |
_DESCRIPTION = """\
|
23 |
The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese.
|
@@ -44,6 +43,7 @@ task_list = ['polysemy_resolution',
|
|
44 |
class ACLUEConfig(datasets.BuilderConfig):
|
45 |
def __init__(self, **kwargs):
|
46 |
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
|
|
47 |
# V1.0.0 Init version
|
48 |
|
49 |
|
@@ -74,17 +74,12 @@ class ACLUE(datasets.GeneratorBasedBuilder):
|
|
74 |
def _split_generators(self, dl_manager):
|
75 |
data_dir = dl_manager.download_and_extract(_URL)
|
76 |
task_name = self.config.name
|
|
|
77 |
return [
|
78 |
datasets.SplitGenerator(
|
79 |
name=datasets.Split.TEST,
|
80 |
gen_kwargs={
|
81 |
-
"filepath": os.path.join(data_dir, f"
|
82 |
-
},
|
83 |
-
),
|
84 |
-
datasets.SplitGenerator(
|
85 |
-
name=datasets.Split("dev"),
|
86 |
-
gen_kwargs={
|
87 |
-
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
88 |
},
|
89 |
),
|
90 |
]
|
|
|
16 |
import datasets
|
17 |
import pandas as pd
|
18 |
|
19 |
+
_CITATION = "https://arxiv.org/abs/2310.09550"
|
|
|
20 |
|
21 |
_DESCRIPTION = """\
|
22 |
The Ancient Chinese Language Understanding Evaluation (ACLUE) is an evaluation benchmark focused on ancient Chinese language comprehension. It aims to assess the performance of large-scale language models on understanding ancient Chinese.
|
|
|
43 |
class ACLUEConfig(datasets.BuilderConfig):
|
44 |
def __init__(self, **kwargs):
|
45 |
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
46 |
+
self.subset = subset or "test"
|
47 |
# V1.0.0 Init version
|
48 |
|
49 |
|
|
|
74 |
def _split_generators(self, dl_manager):
|
75 |
data_dir = dl_manager.download_and_extract(_URL)
|
76 |
task_name = self.config.name
|
77 |
+
subset = self.config.subset
|
78 |
return [
|
79 |
datasets.SplitGenerator(
|
80 |
name=datasets.Split.TEST,
|
81 |
gen_kwargs={
|
82 |
+
"filepath": os.path.join(data_dir, f"{subset}/{task_name}.csv"),
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
},
|
84 |
),
|
85 |
]
|