Datasets:

Languages:
English
ArXiv:
Rricha commited on
Commit
7ef3f12
1 Parent(s): 48f5607

Update climate-evaluation.py

Browse files
Files changed (1) hide show
  1. climate-evaluation.py +19 -31
climate-evaluation.py CHANGED
@@ -89,7 +89,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
89
  text_features={"text": "text"},
90
  label_classes=[0, 1, 2],
91
  label_column="label",
92
- data_dir="ClimateStance",
93
  citation=textwrap.dedent(
94
  """\
95
  @inproceedings{vaid-etal-2022-towards,
@@ -120,7 +120,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
120
  text_features={"text": "text"},
121
  label_classes=["0", "1", "2", "3", "4"],
122
  label_column="label",
123
- data_dir="ClimateEng",
124
  citation=textwrap.dedent(
125
  """\
126
  @inproceedings{vaid-etal-2022-towards,
@@ -151,7 +151,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
151
  text_features={"text": "sentence"},
152
  label_classes=["0", "1"],
153
  label_column="label",
154
- data_dir="ClimaText",
155
  citation=textwrap.dedent(
156
  """\
157
  @misc{varini2021climatext,
@@ -172,7 +172,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
172
  """\
173
  CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
174
  ),
175
- data_dir="CDP",
176
  text_features={"question": "question", "answer": "answer"},
177
  label_classes=["0", "1"],
178
  label_column="label",
@@ -196,7 +196,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
196
  """\
197
  The Exeter Climate Claims dataset contains textual data from 33 influential climate contrarian blogs and the climate-change-related content from 20 conservative think tanks spanning the years 1998 to 2020. Annotation of the dataset was done manually using a thorough three-layer taxonomy of (climate-change related) contrarian claims, which was developed by the authors. We utilize this dataset specifically for the binary classification task of discerning whether a given text contains a contrarian claim pertaining to climate change or not.  """
198
  ),
199
- data_dir="exeter",
200
  text_features={"text": "text"},
201
  label_classes=["0", "1"],
202
  label_column="label",
@@ -223,7 +223,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
223
  EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the English translation of the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies.
224
  """
225
  ),
226
- data_dir="exams/translated",
227
  text_features={"subject": "subject", "question_stem": "question_stem", "choices": "choices"},
228
  label_classes=["A", "B", "C", "D"],
229
  label_column="answerKey",
@@ -261,7 +261,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
261
  EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies. Note, this dataset is in arabic.
262
  """
263
  ),
264
- data_dir="exams/",
265
  text_features={"subject": "subject", "question_stem": "question_stem", "choices": "choices"},
266
  label_classes=["A", "B", "C", "D"],
267
  label_column="answerKey",
@@ -300,16 +300,13 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
300
  {
301
  "subject": datasets.Value("string"),
302
  "question_stem": datasets.Value("string"),
303
- "choices":
304
- {
305
- "text": datasets.features.Sequence(datasets.Value("string")),
306
- "label": datasets.features.Sequence(datasets.Value("string")),
307
- },
308
  "answerKey": datasets.ClassLabel(
309
  names=["A", "B", "C", "D"]
310
  ),
311
  }
312
  )
 
313
  else:
314
  if self.config.name == "cdp_qa":
315
  features = {
@@ -338,6 +335,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
338
 
339
  def _split_generators(self, dl_manager):
340
  data_dir = self.config.data_dir
 
341
 
342
  if self.config.name == "exams" or self.config.name == "translated_exams":
343
  return [
@@ -375,23 +373,11 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
375
  ),
376
  ]
377
 
378
- if self.config.name == "climate_fever":
379
- return [
380
- datasets.SplitGenerator(
381
- name=datasets.Split.TEST,
382
- gen_kwargs={
383
- "data_file": os.path.join(
384
- data_dir or "", "climate-fever-dataset-r1.jsonl"
385
- ),
386
- "split": "test",
387
- },
388
- ),
389
- ]
390
-
391
  if self.config.name == "climatext":
392
  files = {
393
  "train": [
394
- "train-data/Wiki-Doc-Train.tsv",
 
395
  ],
396
  "valid": ["dev-data/Wikipedia (dev).tsv"],
397
  "test": [
@@ -524,12 +510,14 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
524
  example["category"] = category
525
 
526
  if self.config.label_column in row:
 
527
  label = row[self.config.label_column]
528
- # For some tasks, the label is represented as 0 and 1 in the tsv
529
- # files and needs to be cast to integer to work with the feature.
530
- if label_classes and label not in label_classes:
531
- label = int(label) if label else None
532
- example["label"] = process_label(label)
 
533
  else:
534
  example["label"] = process_label(-1)
535
 
 
89
  text_features={"text": "text"},
90
  label_classes=[0, 1, 2],
91
  label_column="label",
92
+ data_dir="climate-evaluation/ClimateStance",
93
  citation=textwrap.dedent(
94
  """\
95
  @inproceedings{vaid-etal-2022-towards,
 
120
  text_features={"text": "text"},
121
  label_classes=["0", "1", "2", "3", "4"],
122
  label_column="label",
123
+ data_dir="climate-evaluation/ClimateEng",
124
  citation=textwrap.dedent(
125
  """\
126
  @inproceedings{vaid-etal-2022-towards,
 
151
  text_features={"text": "sentence"},
152
  label_classes=["0", "1"],
153
  label_column="label",
154
+ data_dir="climate-evaluation/ClimaText",
155
  citation=textwrap.dedent(
156
  """\
157
  @misc{varini2021climatext,
 
172
  """\
173
  CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
174
  ),
175
+ data_dir="climate-evaluation/CDP",
176
  text_features={"question": "question", "answer": "answer"},
177
  label_classes=["0", "1"],
178
  label_column="label",
 
196
  """\
197
  The Exeter Climate Claims dataset contains textual data from 33 influential climate contrarian blogs and the climate-change-related content from 20 conservative think tanks spanning the years 1998 to 2020. Annotation of the dataset was done manually using a thorough three-layer taxonomy of (climate-change related) contrarian claims, which was developed by the authors. We utilize this dataset specifically for the binary classification task of discerning whether a given text contains a contrarian claim pertaining to climate change or not.  """
198
  ),
199
+ data_dir="climate-evaluation/exeter",
200
  text_features={"text": "text"},
201
  label_classes=["0", "1"],
202
  label_column="label",
 
223
  EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the English translation of the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies.
224
  """
225
  ),
226
+ data_dir="climate-evaluation/exams/translated",
227
  text_features={"subject": "subject", "question_stem": "question_stem", "choices": "choices"},
228
  label_classes=["A", "B", "C", "D"],
229
  label_column="answerKey",
 
261
  EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies. Note, this dataset is in arabic.
262
  """
263
  ),
264
+ data_dir="climate-evaluation/exams/",
265
  text_features={"subject": "subject", "question_stem": "question_stem", "choices": "choices"},
266
  label_classes=["A", "B", "C", "D"],
267
  label_column="answerKey",
 
300
  {
301
  "subject": datasets.Value("string"),
302
  "question_stem": datasets.Value("string"),
303
+ "choices": datasets.Value("string"),
 
 
 
 
304
  "answerKey": datasets.ClassLabel(
305
  names=["A", "B", "C", "D"]
306
  ),
307
  }
308
  )
309
+ features["idx"] = datasets.Value("int32")
310
  else:
311
  if self.config.name == "cdp_qa":
312
  features = {
 
335
 
336
  def _split_generators(self, dl_manager):
337
  data_dir = self.config.data_dir
338
+ print(f"self.config.data_dir: {self.config.data_dir}")
339
 
340
  if self.config.name == "exams" or self.config.name == "translated_exams":
341
  return [
 
373
  ),
374
  ]
375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  if self.config.name == "climatext":
377
  files = {
378
  "train": [
379
+ # "train-data/AL-10Ks.tsv : 3000 (58 positives, 2942 negatives) (TSV, 127138 KB).tsv",
380
+ "train-data/AL-Wiki (train).tsv",
381
  ],
382
  "valid": ["dev-data/Wikipedia (dev).tsv"],
383
  "test": [
 
510
  example["category"] = category
511
 
512
  if self.config.label_column in row:
513
+ # print(f"self.config.label_column: {self.config.label_column}")
514
  label = row[self.config.label_column]
515
+ if self.config.name in ["exams", "translated_exams"]:
516
+ example["answerKey"] = process_label(label)
517
+ else:
518
+ if label_classes and label not in label_classes:
519
+ label = int(label) if label else None
520
+ example["label"] = process_label(label)
521
  else:
522
  example["label"] = process_label(-1)
523