Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Portuguese
Size:
1K - 10K
License:
modify config for sourceA
Browse files- aes_enem_dataset.py +17 -24
aes_enem_dataset.py
CHANGED
@@ -47,7 +47,7 @@ _HOMEPAGE = ""
|
|
47 |
_LICENSE = ""
|
48 |
|
49 |
_URLS = {
|
50 |
-
"
|
51 |
"sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
|
52 |
}
|
53 |
|
@@ -85,7 +85,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
85 |
|
86 |
# You will be able to load one or the other configurations in the following list with
|
87 |
BUILDER_CONFIGS = [
|
88 |
-
datasets.BuilderConfig(name="
|
89 |
datasets.BuilderConfig(
|
90 |
name="sourceB",
|
91 |
version=VERSION,
|
@@ -162,17 +162,16 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
162 |
urls = _URLS[self.config.name]
|
163 |
extracted_files = dl_manager.download_and_extract({self.config.name: urls})
|
164 |
html_parser = self._process_html_files(extracted_files)
|
165 |
-
if self.config.name
|
166 |
self._post_process_dataframe(html_parser.sourceA)
|
167 |
self._generate_splits(html_parser.sourceA)
|
|
|
168 |
return [
|
169 |
datasets.SplitGenerator(
|
170 |
name=datasets.Split.TRAIN,
|
171 |
# These kwargs will be passed to _generate_examples
|
172 |
gen_kwargs={
|
173 |
-
"filepath": os.path.join(
|
174 |
-
extracted_files["sourceA"], "sourceA", "train.csv"
|
175 |
-
),
|
176 |
"split": "train",
|
177 |
},
|
178 |
),
|
@@ -180,18 +179,14 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
180 |
name=datasets.Split.VALIDATION,
|
181 |
# These kwargs will be passed to _generate_examples
|
182 |
gen_kwargs={
|
183 |
-
"filepath": os.path.join(
|
184 |
-
extracted_files["sourceA"], "sourceA", "validation.csv"
|
185 |
-
),
|
186 |
"split": "validation",
|
187 |
},
|
188 |
),
|
189 |
datasets.SplitGenerator(
|
190 |
name=datasets.Split.TEST,
|
191 |
gen_kwargs={
|
192 |
-
"filepath": os.path.join(
|
193 |
-
extracted_files["sourceA"], "sourceA", "test.csv"
|
194 |
-
),
|
195 |
"split": "test",
|
196 |
},
|
197 |
),
|
@@ -202,9 +197,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
202 |
datasets.SplitGenerator(
|
203 |
name="full",
|
204 |
gen_kwargs={
|
205 |
-
"filepath":
|
206 |
-
extracted_files["sourceB"], "sourceB", "sourceB.csv"
|
207 |
-
),
|
208 |
"split": "full",
|
209 |
},
|
210 |
),
|
@@ -269,6 +262,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
|
|
269 |
assert (
|
270 |
len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
|
271 |
), "Overlap between val and test id_prompt"
|
|
|
272 |
dirname = os.path.dirname(filepath)
|
273 |
train_df.to_csv(f"{dirname}/train.csv", index=False)
|
274 |
val_df.to_csv(f"{dirname}/validation.csv", index=False)
|
@@ -504,18 +498,17 @@ class HTMLParser:
|
|
504 |
for key, filepath in self.paths_dict.items():
|
505 |
if key != config_name:
|
506 |
continue # TODO improve later, we will only support a single config at a time
|
507 |
-
|
508 |
-
|
509 |
-
self.sourceA = f"{full_path}/sourceA.csv"
|
510 |
elif config_name == "sourceB":
|
511 |
-
self.sourceB = f"{
|
512 |
-
|
513 |
-
|
514 |
-
) as final_file:
|
515 |
writer = csv.writer(final_file)
|
516 |
writer.writerow(CSV_HEADER)
|
517 |
sub_folders = [
|
518 |
-
name for name in os.listdir(
|
519 |
]
|
520 |
essay_id = 0
|
521 |
essay_title = None
|
@@ -531,7 +524,7 @@ class HTMLParser:
|
|
531 |
):
|
532 |
if prompt_folder in PROMPTS_TO_IGNORE:
|
533 |
continue
|
534 |
-
prompt = os.path.join(
|
535 |
prompt_essays = [name for name in os.listdir(prompt)]
|
536 |
essay_year = self._get_essay_year(
|
537 |
self.apply_soup(prompt, "Prompt.html")
|
|
|
47 |
_LICENSE = ""
|
48 |
|
49 |
_URLS = {
|
50 |
+
"sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true",
|
51 |
"sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
|
52 |
}
|
53 |
|
|
|
85 |
|
86 |
# You will be able to load one or the other configurations in the following list with
|
87 |
BUILDER_CONFIGS = [
|
88 |
+
datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description="TODO"),
|
89 |
datasets.BuilderConfig(
|
90 |
name="sourceB",
|
91 |
version=VERSION,
|
|
|
162 |
urls = _URLS[self.config.name]
|
163 |
extracted_files = dl_manager.download_and_extract({self.config.name: urls})
|
164 |
html_parser = self._process_html_files(extracted_files)
|
165 |
+
if "sourceA" in self.config.name:
|
166 |
self._post_process_dataframe(html_parser.sourceA)
|
167 |
self._generate_splits(html_parser.sourceA)
|
168 |
+
folder_sourceA = "/".join((html_parser.sourceA).split("/")[:-1])
|
169 |
return [
|
170 |
datasets.SplitGenerator(
|
171 |
name=datasets.Split.TRAIN,
|
172 |
# These kwargs will be passed to _generate_examples
|
173 |
gen_kwargs={
|
174 |
+
"filepath": os.path.join(folder_sourceA, "train.csv"),
|
|
|
|
|
175 |
"split": "train",
|
176 |
},
|
177 |
),
|
|
|
179 |
name=datasets.Split.VALIDATION,
|
180 |
# These kwargs will be passed to _generate_examples
|
181 |
gen_kwargs={
|
182 |
+
"filepath": os.path.join(folder_sourceA, "validation.csv"),
|
|
|
|
|
183 |
"split": "validation",
|
184 |
},
|
185 |
),
|
186 |
datasets.SplitGenerator(
|
187 |
name=datasets.Split.TEST,
|
188 |
gen_kwargs={
|
189 |
+
"filepath": os.path.join(folder_sourceA, "test.csv"),
|
|
|
|
|
190 |
"split": "test",
|
191 |
},
|
192 |
),
|
|
|
197 |
datasets.SplitGenerator(
|
198 |
name="full",
|
199 |
gen_kwargs={
|
200 |
+
"filepath": html_parser.sourceB,
|
|
|
|
|
201 |
"split": "full",
|
202 |
},
|
203 |
),
|
|
|
262 |
assert (
|
263 |
len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
|
264 |
), "Overlap between val and test id_prompt"
|
265 |
+
# TODO if self.config.name == sourceAWithGrader
|
266 |
dirname = os.path.dirname(filepath)
|
267 |
train_df.to_csv(f"{dirname}/train.csv", index=False)
|
268 |
val_df.to_csv(f"{dirname}/validation.csv", index=False)
|
|
|
498 |
for key, filepath in self.paths_dict.items():
|
499 |
if key != config_name:
|
500 |
continue # TODO improve later, we will only support a single config at a time
|
501 |
+
if "sourceA" in config_name:
|
502 |
+
self.sourceA = f"{filepath}/sourceA/sourceA.csv"
|
|
|
503 |
elif config_name == "sourceB":
|
504 |
+
self.sourceB = f"{filepath}/sourceB/sourceB.csv"
|
505 |
+
file = self.sourceA if self.sourceA else self.sourceB
|
506 |
+
file_dir = "/".join((file).split("/")[:-1])
|
507 |
+
with open(file, "w", newline="", encoding="utf8") as final_file:
|
508 |
writer = csv.writer(final_file)
|
509 |
writer.writerow(CSV_HEADER)
|
510 |
sub_folders = [
|
511 |
+
name for name in os.listdir(file_dir) if not name.endswith(".csv")
|
512 |
]
|
513 |
essay_id = 0
|
514 |
essay_title = None
|
|
|
524 |
):
|
525 |
if prompt_folder in PROMPTS_TO_IGNORE:
|
526 |
continue
|
527 |
+
prompt = os.path.join(file_dir, prompt_folder)
|
528 |
prompt_essays = [name for name in os.listdir(prompt)]
|
529 |
essay_year = self._get_essay_year(
|
530 |
self.apply_soup(prompt, "Prompt.html")
|