abarbosa commited on
Commit
173a0de
·
1 Parent(s): c0a4a51

include sourceB dataset

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +274 -229
aes_enem_dataset.py CHANGED
@@ -48,14 +48,24 @@ _LICENSE = ""
48
 
49
  _URLS = {
50
  "sourceA": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true",
 
51
  }
52
 
53
-
54
  PROMPTS_TO_IGNORE = [
55
  "brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista",
56
  "carta-convite-discutir-discriminacao-na-escola",
57
  "informacao-no-rotulo-de-produtos-transgenicos",
58
  ]
 
 
 
 
 
 
 
 
 
 
59
  CSV_HEADER = [
60
  "id",
61
  "id_prompt",
@@ -73,17 +83,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
73
 
74
  VERSION = datasets.Version("0.0.1")
75
 
76
- # This is an example of a dataset with multiple configurations.
77
- # If you don't want/need to define several sub-sets in your dataset,
78
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
79
-
80
- # If you need to make complex sub-parts in the datasets with configurable options
81
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
82
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
83
-
84
  # You will be able to load one or the other configurations in the following list with
85
- # data = datasets.load_dataset('my_dataset', 'first_domain')
86
- # data = datasets.load_dataset('my_dataset', 'second_domain')
87
  BUILDER_CONFIGS = [
88
  datasets.BuilderConfig(name="sourceA", version=VERSION, description="TODO"),
89
  datasets.BuilderConfig(
@@ -93,23 +93,18 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
93
  ),
94
  ]
95
 
96
- DEFAULT_CONFIG_NAME = "sourceA" # It's not mandatory to have a default configuration. Just use one if it make sense.
97
-
98
  def _info(self):
99
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
100
- if (
101
- self.config.name == "sourceA"
102
- ): # This is the name of the configuration selected in BUILDER_CONFIGS above
103
- features = datasets.Features(
104
- {
105
- "id": datasets.Value("string"),
106
- "id_prompt": datasets.Value("string"),
107
- "essay_title": datasets.Value("string"),
108
- "essay_text": datasets.Value("string"),
109
- "grades": datasets.Sequence(datasets.Value("int16")),
110
- "essay_year": datasets.Value("int16"),
111
- }
112
- )
113
  return datasets.DatasetInfo(
114
  # This is the description that will appear on the datasets page.
115
  description=_DESCRIPTION,
@@ -126,53 +121,7 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
126
  citation=_CITATION,
127
  )
128
 
129
- def _split_generators(self, dl_manager):
130
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
131
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
132
-
133
- urls = _URLS[self.config.name]
134
- extracted_files = dl_manager.download_and_extract({"sourceA": urls})
135
- html_parser = self._process_html_files(extracted_files)
136
- self._generate_splits(html_parser.sourceA)
137
- return [
138
- datasets.SplitGenerator(
139
- name=datasets.Split.TRAIN,
140
- # These kwargs will be passed to _generate_examples
141
- gen_kwargs={
142
- "filepath": os.path.join(
143
- extracted_files["sourceA"], "sourceA", "train.csv"
144
- ),
145
- "split": "train",
146
- },
147
- ),
148
- datasets.SplitGenerator(
149
- name=datasets.Split.VALIDATION,
150
- # These kwargs will be passed to _generate_examples
151
- gen_kwargs={
152
- "filepath": os.path.join(
153
- extracted_files["sourceA"], "sourceA", "validation.csv"
154
- ),
155
- "split": "validation",
156
- },
157
- ),
158
- datasets.SplitGenerator(
159
- name=datasets.Split.TEST,
160
- # These kwargs will be passed to _generate_examples
161
- gen_kwargs={
162
- "filepath": os.path.join(
163
- extracted_files["sourceA"], "sourceA", "test.csv"
164
- ),
165
- "split": "test",
166
- },
167
- ),
168
- ]
169
-
170
- def _process_html_files(self, paths_dict):
171
- html_parser = HTMLParser(paths_dict)
172
- html_parser.parse()
173
- return html_parser
174
-
175
- def _generate_splits(self, filepath: str, train_size=0.7):
176
  def map_year(year):
177
  if year <= 2017:
178
  return "<=2017"
@@ -184,7 +133,8 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
184
 
185
  # We will remove the rows that match the criteria below
186
  if any(
187
- single_grade in grades[:-1] #we ignore the sum, and only check the concetps
 
188
  for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"]
189
  ):
190
  return None
@@ -193,7 +143,6 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
193
  int(grade_mapping.get(grade_concept, grade_concept))
194
  for grade_concept in grades[:-1]
195
  ]
196
-
197
  # Calculate and append the sum of the mapped grades as the last element
198
  mapped_grades.append(sum(mapped_grades))
199
  return mapped_grades
@@ -203,9 +152,73 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
203
  df["essay_year"] = df["essay_year"].astype("int")
204
  df["mapped_year"] = df["essay_year"].apply(map_year)
205
  df["grades"] = df["grades"].apply(normalize_grades)
206
- df = df.dropna()
207
- buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
208
- df.drop('mapped_year', axis=1, inplace=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  train_set = []
210
  val_set = []
211
  test_set = []
@@ -263,20 +276,19 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
263
 
264
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
265
  def _generate_examples(self, filepath, split):
266
- if self.config.name == "sourceA":
267
- with open(filepath, encoding="utf-8") as csvfile:
268
- next(csvfile)
269
- csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
270
- for i, row in enumerate(csv_reader):
271
- grades = row["grades"].strip("[]").split(", ")
272
- yield i, {
273
- "id": row["id"],
274
- "id_prompt": row["id_prompt"],
275
- "essay_title": row["title"],
276
- "essay_text": row["essay"],
277
- "grades": grades,
278
- "essay_year": row["essay_year"],
279
- }
280
 
281
 
282
  class HTMLParser:
@@ -292,148 +304,186 @@ class HTMLParser:
292
  soup = BeautifulSoup(conteudo, "html.parser")
293
  return soup
294
 
295
- @staticmethod
296
- def _get_title(soup):
297
- title = soup.find("div", class_="container-composition")
298
- if title is None:
299
- title = soup.find("h1", class_="pg-color10").get_text()
300
- else:
301
- title = title.h2.get_text()
302
- title = title.replace("\xa0", "")
303
- return title
304
-
305
- @staticmethod
306
- def _get_grades(soup):
307
- grades = soup.find("section", class_="results-table")
308
- final_grades = []
309
- if grades is not None:
310
- grades = grades.find_all("span", class_="points")
311
- assert len(grades) == 6, f"Missing grades: {len(grades)}"
312
- for single_grade in grades:
313
- grade = int(single_grade.get_text())
314
- final_grades.append(grade)
315
- assert final_grades[-1] == sum(
316
- final_grades[:-1]
317
- ), "Grading sum is not making sense"
318
- else:
319
- grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7")
320
- grades_sum = float(
321
- soup.find("th", class_="noBorder-left").get_text().replace(",", ".")
322
- )
323
- grades = grades.find_all("td")[:10]
324
- for idx in range(1, 10, 2):
325
- grade = float(grades[idx].get_text().replace(",", "."))
326
- final_grades.append(grade)
327
- assert grades_sum == sum(final_grades), "Grading sum is not making sense"
328
- final_grades.append(grades_sum)
329
- return final_grades
330
-
331
- @staticmethod
332
- def _get_general_comment(soup):
333
- def get_general_comment_aux(soup):
334
- result = soup.find("article", class_="list-item c")
335
- if result is not None:
336
- result = result.find("div", class_="description")
337
- return result.get_text()
338
  else:
339
- result = soup.find("p", style="margin: 0px 0px 11px;")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  if result is not None:
 
341
  return result.get_text()
342
  else:
343
- result = soup.find("p", style="margin: 0px;")
344
  if result is not None:
345
  return result.get_text()
346
  else:
347
- result = soup.find(
348
- "p", style="margin: 0px; text-align: justify;"
349
- )
350
  if result is not None:
351
  return result.get_text()
352
  else:
353
- return ""
354
-
355
- text = soup.find("div", class_="text")
356
- if text is not None:
357
- text = text.find("p")
358
- if (text is None) or (len(text.get_text()) < 2):
 
 
 
 
 
 
 
 
 
359
  return get_general_comment_aux(soup)
360
- return text.get_text()
361
- else:
362
- return get_general_comment_aux(soup)
363
 
364
- @staticmethod
365
- def _get_specific_comment(soup):
366
- result = soup.find("div", class_="text")
367
- if result is not None:
368
- result = result.find_all("li")
369
  cms = []
370
- if result != []:
371
- for item in result:
372
- text = item.get_text()
373
- if text != "\xa0":
374
- cms.append(text)
375
- return cms
 
 
 
 
 
 
 
376
  else:
377
- result = soup.find("div", class_="text").find_all("p")
 
 
 
378
  for item in result:
379
  text = item.get_text()
380
  if text != "\xa0":
381
  cms.append(text)
382
- return cms
383
- else:
384
- result = soup.find_all("article", class_="list-item c")
385
- if len(result) < 2:
386
- return ["First if"]
387
- result = result[1].find_all("p")
388
- cms = []
389
- for item in result:
390
- text = item.get_text()
391
- if text != "\xa0":
392
- cms.append(text)
393
- return cms
394
-
395
- @staticmethod
396
- def _get_essay(soup):
397
- essay = soup.find("div", class_="text-composition")
398
- if essay is not None:
399
- essay = essay.find_all("p")
400
- for f in essay:
401
- while f.find("span", style="color:#00b050") is not None:
402
- f.find("span", style="color:#00b050").decompose()
403
- while f.find("span", class_="certo") is not None:
404
- f.find("span", class_="certo").decompose()
405
  result = []
406
- for paragraph in essay:
407
- result.append(paragraph.get_text())
408
- return result
409
- else:
410
- essay = soup.find("div", {"id": "texto"})
411
- essay.find("section", class_="list-items").decompose()
412
- essay = essay.find_all("p")
413
- for f in essay:
414
- while f.find("span", class_="certo") is not None:
415
- f.find("span", class_="certo").decompose()
416
- result = []
417
- for paragraph in essay:
418
- result.append(paragraph.get_text())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
  return result
420
 
421
- @staticmethod
422
- def _get_essay_year(soup):
423
- pattern = r"redações corrigidas - \w+/\d+"
424
- first_occurrence = re.search(pattern, soup.get_text().lower())
425
- matched_url = first_occurrence.group(0) if first_occurrence else None
426
- year_pattern = r"\d{4}"
427
- return re.search(year_pattern, matched_url).group(0)
 
 
 
 
428
 
429
  def _clean_title(self, title):
430
- smaller_index = title.find("[")
431
- if smaller_index == -1:
 
 
 
 
 
 
 
432
  return title
433
- else:
434
- bigger_index = title.find("]")
435
- new_title = title[:smaller_index] + title[bigger_index + 1 :]
436
- return self._clean_title(new_title.replace(" ", " "))
437
 
438
  def _clean_list(self, list):
439
  if list == []:
@@ -450,11 +500,15 @@ class HTMLParser:
450
  new_list.append(phrase)
451
  return new_list
452
 
453
- def parse(self):
454
  for key, filepath in self.paths_dict.items():
 
 
455
  full_path = os.path.join(filepath, key)
456
- if key == "sourceA":
457
  self.sourceA = f"{full_path}/sourceA.csv"
 
 
458
  with open(
459
  f"{full_path}/{key}.csv", "w", newline="", encoding="utf8"
460
  ) as final_file:
@@ -479,29 +533,20 @@ class HTMLParser:
479
  continue
480
  prompt = os.path.join(full_path, prompt_folder)
481
  prompt_essays = [name for name in os.listdir(prompt)]
482
- essay_year = HTMLParser._get_essay_year(
483
  self.apply_soup(prompt, "Prompt.html")
484
  )
485
  for essay in prompt_essays:
486
  soup_text = self.apply_soup(prompt, essay)
487
  if essay == "Prompt.html":
488
  continue
489
- essay_title = self._clean_title(
490
- HTMLParser._get_title(soup_text).replace(";", ",")
 
 
 
 
491
  )
492
- essay_grades = HTMLParser._get_grades(soup_text)
493
- general_comment = HTMLParser._get_general_comment(
494
- soup_text
495
- ).strip()
496
- specific_comment = HTMLParser._get_specific_comment(soup_text)
497
- if general_comment in specific_comment:
498
- specific_comment.remove(general_comment)
499
- if (len(specific_comment) > 1) and (
500
- len(specific_comment[0]) < 2
501
- ):
502
- specific_comment = specific_comment[1:]
503
- essay_text = self._clean_list(HTMLParser._get_essay(soup_text))
504
- specific_comment = self._clean_list(specific_comment)
505
  writer.writerow(
506
  [
507
  essay,
 
48
 
49
  _URLS = {
50
  "sourceA": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true",
51
+ "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
52
  }
53
 
 
54
  PROMPTS_TO_IGNORE = [
55
  "brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista",
56
  "carta-convite-discutir-discriminacao-na-escola",
57
  "informacao-no-rotulo-de-produtos-transgenicos",
58
  ]
59
+
60
+ # Essays to Ignore
61
+ ESSAY_TO_IGNORE = [
62
+ "direitos-em-conflito-liberdade-de-expressao-e-intimidade/2.html",
63
+ "terceirizacao-avanco-ou-retrocesso/2.html",
64
+ "artes-e-educacao-fisica-opcionais-ou-obrigatorias/2.html",
65
+ "violencia-e-drogas-o-papel-do-usuario/0.html",
66
+ "internacao-compulsoria-de-dependentes-de-crack/0.html",
67
+ ]
68
+
69
  CSV_HEADER = [
70
  "id",
71
  "id_prompt",
 
83
 
84
  VERSION = datasets.Version("0.0.1")
85
 
 
 
 
 
 
 
 
 
86
  # You will be able to load one or the other configurations in the following list with
 
 
87
  BUILDER_CONFIGS = [
88
  datasets.BuilderConfig(name="sourceA", version=VERSION, description="TODO"),
89
  datasets.BuilderConfig(
 
93
  ),
94
  ]
95
 
 
 
96
  def _info(self):
97
+ features = datasets.Features(
98
+ {
99
+ "id": datasets.Value("string"),
100
+ "id_prompt": datasets.Value("string"),
101
+ "essay_title": datasets.Value("string"),
102
+ "essay_text": datasets.Value("string"),
103
+ "grades": datasets.Sequence(datasets.Value("int16")),
104
+ "essay_year": datasets.Value("int16"),
105
+ }
106
+ )
107
+
 
 
 
108
  return datasets.DatasetInfo(
109
  # This is the description that will appear on the datasets page.
110
  description=_DESCRIPTION,
 
121
  citation=_CITATION,
122
  )
123
 
124
+ def _post_process_dataframe(self, filepath):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  def map_year(year):
126
  if year <= 2017:
127
  return "<=2017"
 
133
 
134
  # We will remove the rows that match the criteria below
135
  if any(
136
+ single_grade
137
+ in grades[:-1] # we ignore the sum, and only check the concetps
138
  for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"]
139
  ):
140
  return None
 
143
  int(grade_mapping.get(grade_concept, grade_concept))
144
  for grade_concept in grades[:-1]
145
  ]
 
146
  # Calculate and append the sum of the mapped grades as the last element
147
  mapped_grades.append(sum(mapped_grades))
148
  return mapped_grades
 
152
  df["essay_year"] = df["essay_year"].astype("int")
153
  df["mapped_year"] = df["essay_year"].apply(map_year)
154
  df["grades"] = df["grades"].apply(normalize_grades)
155
+ df = df.dropna(subset=["grades"])
156
+ df = df[
157
+ ~(df["id_prompt"] + "/" + df["id"]).isin(ESSAY_TO_IGNORE)
158
+ ] # arbitrary removal of zero graded essays
159
+ df.to_csv(filepath, index=False)
160
+
161
+ def _split_generators(self, dl_manager):
162
+ urls = _URLS[self.config.name]
163
+ extracted_files = dl_manager.download_and_extract({self.config.name: urls})
164
+ html_parser = self._process_html_files(extracted_files)
165
+ if self.config.name == "sourceA":
166
+ self._post_process_dataframe(html_parser.sourceA)
167
+ self._generate_splits(html_parser.sourceA)
168
+ return [
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TRAIN,
171
+ # These kwargs will be passed to _generate_examples
172
+ gen_kwargs={
173
+ "filepath": os.path.join(
174
+ extracted_files["sourceA"], "sourceA", "train.csv"
175
+ ),
176
+ "split": "train",
177
+ },
178
+ ),
179
+ datasets.SplitGenerator(
180
+ name=datasets.Split.VALIDATION,
181
+ # These kwargs will be passed to _generate_examples
182
+ gen_kwargs={
183
+ "filepath": os.path.join(
184
+ extracted_files["sourceA"], "sourceA", "validation.csv"
185
+ ),
186
+ "split": "validation",
187
+ },
188
+ ),
189
+ datasets.SplitGenerator(
190
+ name=datasets.Split.TEST,
191
+ gen_kwargs={
192
+ "filepath": os.path.join(
193
+ extracted_files["sourceA"], "sourceA", "test.csv"
194
+ ),
195
+ "split": "test",
196
+ },
197
+ ),
198
+ ]
199
+ elif self.config.name == "sourceB":
200
+ self._post_process_dataframe(html_parser.sourceB)
201
+ return [
202
+ datasets.SplitGenerator(
203
+ name="full",
204
+ gen_kwargs={
205
+ "filepath": os.path.join(
206
+ extracted_files["sourceB"], "sourceB", "sourceB.csv"
207
+ ),
208
+ "split": "full",
209
+ },
210
+ ),
211
+ ]
212
+
213
+ def _process_html_files(self, paths_dict):
214
+ html_parser = HTMLParser(paths_dict)
215
+ html_parser.parse(self.config.name)
216
+ return html_parser
217
+
218
+ def _generate_splits(self, filepath: str, train_size=0.7):
219
+ df = pd.read_csv(filepath)
220
+ buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
221
+ df.drop("mapped_year", axis=1, inplace=True)
222
  train_set = []
223
  val_set = []
224
  test_set = []
 
276
 
277
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
278
  def _generate_examples(self, filepath, split):
279
+ with open(filepath, encoding="utf-8") as csvfile:
280
+ next(csvfile)
281
+ csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
282
+ for i, row in enumerate(csv_reader):
283
+ grades = row["grades"].strip("[]").split(", ")
284
+ yield i, {
285
+ "id": row["id"],
286
+ "id_prompt": row["id_prompt"],
287
+ "essay_title": row["title"],
288
+ "essay_text": row["essay"],
289
+ "grades": grades,
290
+ "essay_year": row["essay_year"],
291
+ }
 
292
 
293
 
294
  class HTMLParser:
 
304
  soup = BeautifulSoup(conteudo, "html.parser")
305
  return soup
306
 
307
+ def _get_title(self, soup):
308
+ if self.sourceA:
309
+ title = soup.find("div", class_="container-composition")
310
+ if title is None:
311
+ title = soup.find("h1", class_="pg-color10").get_text()
312
+ else:
313
+ title = title.h2.get_text()
314
+ title = title.replace("\xa0", "")
315
+ return title.replace(";", ",")
316
+ elif self.sourceB:
317
+ title = soup.find("h1", class_="titulo-conteudo").get_text()
318
+ return title.strip("- Banco de redações").strip()
319
+
320
+ def _get_grades(self, soup):
321
+ if self.sourceA:
322
+ grades = soup.find("section", class_="results-table")
323
+ final_grades = []
324
+ if grades is not None:
325
+ grades = grades.find_all("span", class_="points")
326
+ assert len(grades) == 6, f"Missing grades: {len(grades)}"
327
+ for single_grade in grades:
328
+ grade = int(single_grade.get_text())
329
+ final_grades.append(grade)
330
+ assert final_grades[-1] == sum(
331
+ final_grades[:-1]
332
+ ), "Grading sum is not making sense"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
  else:
334
+ grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7")
335
+ grades_sum = float(
336
+ soup.find("th", class_="noBorder-left").get_text().replace(",", ".")
337
+ )
338
+ grades = grades.find_all("td")[:10]
339
+ for idx in range(1, 10, 2):
340
+ grade = float(grades[idx].get_text().replace(",", "."))
341
+ final_grades.append(grade)
342
+ assert grades_sum == sum(
343
+ final_grades
344
+ ), "Grading sum is not making sense"
345
+ final_grades.append(grades_sum)
346
+ return final_grades
347
+ elif self.sourceB:
348
+ table = soup.find("table", {"id": "redacoes_corrigidas"})
349
+ grades = table.find_all("td", class_="simple-td")
350
+ grades = grades[3:]
351
+ result = []
352
+ for single_grade in grades:
353
+ result.append(int(single_grade.get_text()))
354
+ return result
355
+
356
+ def _get_general_comment(self, soup):
357
+ if self.sourceA:
358
+
359
+ def get_general_comment_aux(soup):
360
+ result = soup.find("article", class_="list-item c")
361
  if result is not None:
362
+ result = result.find("div", class_="description")
363
  return result.get_text()
364
  else:
365
+ result = soup.find("p", style="margin: 0px 0px 11px;")
366
  if result is not None:
367
  return result.get_text()
368
  else:
369
+ result = soup.find("p", style="margin: 0px;")
 
 
370
  if result is not None:
371
  return result.get_text()
372
  else:
373
+ result = soup.find(
374
+ "p", style="margin: 0px; text-align: justify;"
375
+ )
376
+ if result is not None:
377
+ return result.get_text()
378
+ else:
379
+ return ""
380
+
381
+ text = soup.find("div", class_="text")
382
+ if text is not None:
383
+ text = text.find("p")
384
+ if (text is None) or (len(text.get_text()) < 2):
385
+ return get_general_comment_aux(soup)
386
+ return text.get_text()
387
+ else:
388
  return get_general_comment_aux(soup)
389
+ elif self.sourceB:
390
+ return ""
 
391
 
392
+ def _get_specific_comment(self, soup, general_comment):
393
+ if self.sourceA:
394
+ result = soup.find("div", class_="text")
 
 
395
  cms = []
396
+ if result is not None:
397
+ result = result.find_all("li")
398
+ if result != []:
399
+ for item in result:
400
+ text = item.get_text()
401
+ if text != "\xa0":
402
+ cms.append(text)
403
+ else:
404
+ result = soup.find("div", class_="text").find_all("p")
405
+ for item in result:
406
+ text = item.get_text()
407
+ if text != "\xa0":
408
+ cms.append(text)
409
  else:
410
+ result = soup.find_all("article", class_="list-item c")
411
+ if len(result) < 2:
412
+ return ["First if"]
413
+ result = result[1].find_all("p")
414
  for item in result:
415
  text = item.get_text()
416
  if text != "\xa0":
417
  cms.append(text)
418
+ specific_comment = cms.copy()
419
+ if general_comment in specific_comment:
420
+ specific_comment.remove(general_comment)
421
+ if (len(specific_comment) > 1) and (len(specific_comment[0]) < 2):
422
+ specific_comment = specific_comment[1:]
423
+ return self._clean_list(specific_comment)
424
+ elif self.sourceB:
425
+ return ""
426
+
427
+ def _get_essay(self, soup):
428
+ if self.sourceA:
429
+ essay = soup.find("div", class_="text-composition")
 
 
 
 
 
 
 
 
 
 
 
430
  result = []
431
+ if essay is not None:
432
+ essay = essay.find_all("p")
433
+ for f in essay:
434
+ while f.find("span", style="color:#00b050") is not None:
435
+ f.find("span", style="color:#00b050").decompose()
436
+ while f.find("span", class_="certo") is not None:
437
+ f.find("span", class_="certo").decompose()
438
+ for paragraph in essay:
439
+ result.append(paragraph.get_text())
440
+ else:
441
+ essay = soup.find("div", {"id": "texto"})
442
+ essay.find("section", class_="list-items").decompose()
443
+ essay = essay.find_all("p")
444
+ for f in essay:
445
+ while f.find("span", class_="certo") is not None:
446
+ f.find("span", class_="certo").decompose()
447
+ for paragraph in essay:
448
+ result.append(paragraph.get_text())
449
+ return " ".join(self._clean_list(result))
450
+ elif self.sourceB:
451
+ table = soup.find("article", class_="texto-conteudo entire")
452
+ table = soup.find("div", class_="area-redacao-corrigida")
453
+ if table is None:
454
+ result = None
455
+ else:
456
+ for span in soup.find_all("span"):
457
+ span.decompose()
458
+ result = table.find_all("p")
459
+ result = " ".join(
460
+ [paragraph.get_text().strip() for paragraph in result]
461
+ )
462
  return result
463
 
464
+ def _get_essay_year(self, soup):
465
+ if self.sourceA:
466
+ pattern = r"redações corrigidas - \w+/\d+"
467
+ first_occurrence = re.search(pattern, soup.get_text().lower())
468
+ matched_url = first_occurrence.group(0) if first_occurrence else None
469
+ year_pattern = r"\d{4}"
470
+ return re.search(year_pattern, matched_url).group(0)
471
+ elif self.sourceB:
472
+ pattern = r"Enviou seu texto em.*?(\d{4})"
473
+ match = re.search(pattern, soup.get_text())
474
+ return match.group(1) if match else -1
475
 
476
  def _clean_title(self, title):
477
+ if self.sourceA:
478
+ smaller_index = title.find("[")
479
+ if smaller_index == -1:
480
+ return title
481
+ else:
482
+ bigger_index = title.find("]")
483
+ new_title = title[:smaller_index] + title[bigger_index + 1 :]
484
+ return self._clean_title(new_title.replace(" ", " "))
485
+ elif self.sourceB:
486
  return title
 
 
 
 
487
 
488
  def _clean_list(self, list):
489
  if list == []:
 
500
  new_list.append(phrase)
501
  return new_list
502
 
503
+ def parse(self, config_name):
504
  for key, filepath in self.paths_dict.items():
505
+ if key != config_name:
506
+ continue # TODO improve later, we will only support a single config at a time
507
  full_path = os.path.join(filepath, key)
508
+ if config_name == "sourceA":
509
  self.sourceA = f"{full_path}/sourceA.csv"
510
+ elif config_name == "sourceB":
511
+ self.sourceB = f"{full_path}/sourceB.csv"
512
  with open(
513
  f"{full_path}/{key}.csv", "w", newline="", encoding="utf8"
514
  ) as final_file:
 
533
  continue
534
  prompt = os.path.join(full_path, prompt_folder)
535
  prompt_essays = [name for name in os.listdir(prompt)]
536
+ essay_year = self._get_essay_year(
537
  self.apply_soup(prompt, "Prompt.html")
538
  )
539
  for essay in prompt_essays:
540
  soup_text = self.apply_soup(prompt, essay)
541
  if essay == "Prompt.html":
542
  continue
543
+ essay_title = self._clean_title(self._get_title(soup_text))
544
+ essay_grades = self._get_grades(soup_text)
545
+ essay_text = self._get_essay(soup_text)
546
+ general_comment = self._get_general_comment(soup_text).strip()
547
+ specific_comment = self._get_specific_comment(
548
+ soup_text, general_comment
549
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
550
  writer.writerow(
551
  [
552
  essay,