bzantium commited on
Commit
1643ce6
1 Parent(s): fec618b

initial commit

Browse files
metrics/edit_sim.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import lru_cache
2
+
3
+
4
+ def lev_dist(prediction, ground_truth):
5
+ @lru_cache(None) # for memorization
6
+ def min_dist(s1, s2):
7
+ if s1 == len(prediction) or s2 == len(ground_truth):
8
+ return len(prediction) - s1 + len(ground_truth) - s2
9
+ # no change required
10
+ if prediction[s1] == ground_truth[s2]:
11
+ return min_dist(s1 + 1, s2 + 1)
12
+ return 1 + min(
13
+ min_dist(s1, s2 + 1), # insert character
14
+ min_dist(s1 + 1, s2), # delete character
15
+ min_dist(s1 + 1, s2 + 1), # replace character
16
+ )
17
+ return min_dist(0, 0)
18
+
19
+
20
+ def edit_sim_score(a, b):
21
+ return 1 - lev_dist(a, b) / max(len(a), len(b))
22
+
23
+
24
+ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
25
+ scores_for_ground_truths = []
26
+ for ground_truth in ground_truths:
27
+ score = metric_fn(prediction, ground_truth)
28
+ scores_for_ground_truths.append(score)
29
+ return max(scores_for_ground_truths)
30
+
31
+
32
+ def compute_edit_sim(predictions, references):
33
+ edit_sim = 0
34
+ for prediction, ground_truths in zip(predictions, references):
35
+ edit_sim += metric_max_over_ground_truths(edit_sim_score, prediction, ground_truths)
36
+ return 100.0 * edit_sim / len(predictions)
metrics/exact_match.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import string
3
+
4
+
5
+ def normalize_answer(s):
6
+ """Lower text and remove punctuation, articles and extra whitespace."""
7
+
8
+ def remove_articles(text):
9
+ return re.sub(r"\b(a|an|the)\b", " ", text)
10
+
11
+ def white_space_fix(text):
12
+ return " ".join(text.split())
13
+
14
+ def remove_punc(text):
15
+ exclude = set(string.punctuation)
16
+ return "".join(ch for ch in text if ch not in exclude)
17
+
18
+ def lower(text):
19
+ return text.lower()
20
+
21
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
22
+
23
+
24
+ def exact_match_score(prediction, ground_truth):
25
+ return normalize_answer(prediction) == normalize_answer(ground_truth)
26
+
27
+
28
+ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
29
+ scores_for_ground_truths = []
30
+ for ground_truth in ground_truths:
31
+ score = metric_fn(prediction, ground_truth)
32
+ scores_for_ground_truths.append(score)
33
+ return max(scores_for_ground_truths)
34
+
35
+
36
+ def compute_exact_match(predictions, references):
37
+ exact_match = 0
38
+ for prediction, ground_truths in zip(predictions, references):
39
+ exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
40
+ return 100.0 * exact_match / len(predictions)
metrics/f1.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/huggingface/datasets/blob/d3c7b9481d427ce41256edaf6773c47570f06f3b/metrics/squad/evaluate.py
2
+
3
+ import re
4
+ import string
5
+ from collections import Counter
6
+
7
+
8
+ def normalize_answer(s):
9
+ """Lower text and remove punctuation, articles and extra whitespace."""
10
+
11
+ def remove_articles(text):
12
+ return re.sub(r"\b(a|an|the)\b", " ", text)
13
+
14
+ def white_space_fix(text):
15
+ return " ".join(text.split())
16
+
17
+ def remove_punc(text):
18
+ exclude = set(string.punctuation)
19
+ return "".join(ch for ch in text if ch not in exclude)
20
+
21
+ def lower(text):
22
+ return text.lower()
23
+
24
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
25
+
26
+
27
+ def f1_score(prediction, ground_truth):
28
+ prediction_tokens = normalize_answer(prediction).split()
29
+ ground_truth_tokens = normalize_answer(ground_truth).split()
30
+ common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
31
+ num_same = sum(common.values())
32
+ if num_same == 0:
33
+ return 0
34
+ precision = 1.0 * num_same / len(prediction_tokens)
35
+ recall = 1.0 * num_same / len(ground_truth_tokens)
36
+ f1 = (2 * precision * recall) / (precision + recall)
37
+ return f1
38
+
39
+
40
+ def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
41
+ scores_for_ground_truths = []
42
+ for ground_truth in ground_truths:
43
+ score = metric_fn(prediction, ground_truth)
44
+ scores_for_ground_truths.append(score)
45
+ return max(scores_for_ground_truths)
46
+
47
+
48
+ def compute_f1(predictions, references):
49
+ f1 = 0
50
+ for prediction, ground_truths in zip(predictions, references):
51
+ f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
52
+ return 100.0 * f1 / len(predictions)
metrics/longbench.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ longbench benchmark metric. """
2
+
3
+ from collections import defaultdict
4
+ from copy import deepcopy
5
+ import datasets
6
+
7
+ # fmt: off
8
+ from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text # From: https://huggingface.co/datasets/tau/longbench/raw/main/metrics/rouge.py
9
+ from .exact_match import compute_exact_match # From: https://huggingface.co/datasets/tau/longbench/raw/main/metrics/exact_match.py
10
+ from .f1 import compute_f1 # From: https://huggingface.co/datasets/tau/longbench/raw/main/metrics/f1.py
11
+ from .edit_sim import compute_edit_sim
12
+ # fmt: on
13
+
14
+ _CITATION = """\
15
+ @misc{bai2023longbench,
16
+ title={LongBench: A Bilingual, Multitask Benchmark for Long Context Understanding},
17
+ author={Yushi Bai and Xin Lv and Jiajie Zhang and Hongchang Lyu and Jiankai Tang and Zhidian Huang and Zhengxiao Du and Xiao Liu and Aohan Zeng and Lei Hou and Yuxiao Dong and Jie Tang and Juanzi Li},
18
+ year={2023},
19
+ eprint={2308.14508},
20
+ archivePrefix={arXiv},
21
+ primaryClass={cs.CL}
22
+ }
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+ LongBench is a comprehensive benchmark for multilingual and multi-task purposes, with the goal to fully measure and evaluate the ability of pre-trained language models to understand long text. This dataset consists of twenty different tasks, covering key long-text application scenarios such as multi-document QA, single-document QA, summarization, few-shot learning, synthetic tasks, and code completion.
27
+ """
28
+
29
+ _KWARGS_DESCRIPTION = """
30
+ Compute LongBench evaluation metric associated to each LongBench dataset.
31
+ Args:
32
+ predictions: list of predictions to score.
33
+ Each prediction should be a string.
34
+ references: list of lists of references for each example.
35
+ Each reference should be a string.
36
+ Returns: depending on the LongBench subset, one or several of:
37
+ "exact_match": Exact Match score
38
+ "f1": F1 score
39
+ "rouge": ROUGE score
40
+
41
+ Use the following code to download the metric:
42
+ ```
43
+ import os, shutil
44
+ from huggingface_hub import hf_hub_download
45
+ def download_metric():
46
+ longbench_metric_path = hf_hub_download(repo_id="datasets/tau/longbench", filename="metrics/longbench.py")
47
+ updated_longbench_metric_path = (
48
+ os.path.dirname(longbench_metric_path) + os.path.basename(longbench_metric_path).replace(".", "_") + ".py"
49
+ )
50
+ shutil.copy(longbench_metric_path, updated_longbench_metric_path)
51
+ return updated_longbench_metric_path
52
+
53
+ longbench_metric_path = download_metric()
54
+ ```
55
+
56
+ Examples:
57
+ predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
58
+ references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
59
+
60
+ >>> longbench_metric = datasets.load_metric(longbench_metric_path, 'gov_report') # 'gov_report' or any of ["qmsum", "summ_screen_fd"]
61
+ >>> results = longbench_metric.compute(predictions=predictions, references=references)
62
+ >>> print(results)
63
+ {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136,
64
+ 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'longbench_score': 55.8136,
65
+ 'display_keys': ['rouge/rouge1', 'rouge/rouge2', 'rouge/rougeL'], 'display': [72.2222, 33.3333, 72.2222]}
66
+
67
+ >>> longbench_metric = datasets.load_metric(longbench_metric_path, 'contract_nli') # 'contract_nli' or "quality"
68
+ >>> results = longbench_metric.compute(predictions=predictions, references=references)
69
+ >>> print(results)
70
+ {'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'longbench_score': 33.3333,
71
+ 'display_keys': ['exact_match'], 'display': [33.3333]}
72
+
73
+ >>> longbench_metric = datasets.load_metric(longbench_metric_path, 'narrative_qa') # 'narrative_qa' or "qasper"
74
+ >>> results = longbench_metric.compute(predictions=predictions, references=references)
75
+ >>> print(results)
76
+ {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667, 'longbench_score': 72.2222,
77
+ 'display_keys': ['f1'], 'display': [72.2222]}
78
+ """
79
+
80
+ DATASET_TO_METRICS = {
81
+ "narrative_qa": {
82
+ "metrics_to_compute": ["f1"],
83
+ "longbench_score_key": "f1",
84
+ "display_keys": ["f1"],
85
+ },
86
+ "qasper": {
87
+ "metrics_to_compute": ["f1"],
88
+ "longbench_score_key": "f1",
89
+ "display_keys": ["f1"],
90
+ },
91
+ "multifieldqa_en": {
92
+ "metrics_to_compute": ["f1"],
93
+ "longbench_score_key": "f1",
94
+ "display_keys": ["f1"],
95
+ },
96
+ "multifieldqa_zh": {
97
+ "metrics_to_compute": ["f1"],
98
+ "longbench_score_key": "f1",
99
+ "display_keys": ["f1"],
100
+ },
101
+ "hotpotqa": {
102
+ "metrics_to_compute": ["f1"],
103
+ "longbench_score_key": "f1",
104
+ "display_keys": ["f1"],
105
+ },
106
+ "2wikimqa": {
107
+ "metrics_to_compute": ["f1"],
108
+ "longbench_score_key": "f1",
109
+ "display_keys": ["f1"],
110
+ },
111
+ "musique": {
112
+ "metrics_to_compute": ["f1"],
113
+ "longbench_score_key": "f1",
114
+ "display_keys": ["f1"],
115
+ },
116
+ "dureader": {
117
+ "metrics_to_compute": ["rouge"],
118
+ "longbench_score_key": "rouge/geometric_mean",
119
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
120
+ },
121
+ "gov_report": {
122
+ "metrics_to_compute": ["rouge"],
123
+ "longbench_score_key": "rouge/geometric_mean",
124
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
125
+ },
126
+ "qmsum": {
127
+ "metrics_to_compute": ["rouge"],
128
+ "longbench_score_key": "rouge/geometric_mean",
129
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
130
+ },
131
+ "multi_news": {
132
+ "metrics_to_compute": ["rouge"],
133
+ "longbench_score_key": "rouge/geometric_mean",
134
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
135
+ },
136
+ "vcsum": {
137
+ "metrics_to_compute": ["rouge"],
138
+ "longbench_score_key": "rouge/geometric_mean",
139
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
140
+ },
141
+ "trec": {
142
+ "metrics_to_compute": ["exact_match"],
143
+ "longbench_score_key": "exact_match",
144
+ "display_keys": ["exact_match"],
145
+ },
146
+ "triviaqa": {
147
+ "metrics_to_compute": ["f1"],
148
+ "longbench_score_key": "f1",
149
+ "display_keys": ["f1"],
150
+ },
151
+ "samsum": {
152
+ "metrics_to_compute": ["rouge"],
153
+ "longbench_score_key": "rouge/geometric_mean",
154
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
155
+ },
156
+ "lsht": {
157
+ "metrics_to_compute": ["exact_match"],
158
+ "longbench_score_key": "exact_match",
159
+ "display_keys": ["exact_match"],
160
+ },
161
+ "passage_count": {
162
+ "metrics_to_compute": ["exact_match"],
163
+ "longbench_score_key": "exact_match",
164
+ "display_keys": ["exact_match"],
165
+ },
166
+ "passage_retrieval_en": {
167
+ "metrics_to_compute": ["exact_match"],
168
+ "longbench_score_key": "exact_match",
169
+ "display_keys": ["exact_match"],
170
+ },
171
+ "passage_retrieval_zh": {
172
+ "metrics_to_compute": ["exact_match"],
173
+ "longbench_score_key": "exact_match",
174
+ "display_keys": ["exact_match"],
175
+ },
176
+ "lcc": {
177
+ "metrics_to_compute": ["edit_sim"],
178
+ "longbench_score_key": "edit_sim",
179
+ "display_keys": ["edit_sim"],
180
+ },
181
+ "repobench-p": {
182
+ "metrics_to_compute": ["edit_sim"],
183
+ "longbench_score_key": "edit_sim",
184
+ "display_keys": ["edit_sim"],
185
+ },
186
+ "qasper_e": {
187
+ "metrics_to_compute": ["f1"],
188
+ "longbench_score_key": "f1",
189
+ "display_keys": ["f1"],
190
+ },
191
+ "multifieldqa_en_e": {
192
+ "metrics_to_compute": ["f1"],
193
+ "longbench_score_key": "f1",
194
+ "display_keys": ["f1"],
195
+ },
196
+ "hotpotqa_e": {
197
+ "metrics_to_compute": ["f1"],
198
+ "longbench_score_key": "f1",
199
+ "display_keys": ["f1"],
200
+ },
201
+ "2wikimqa_e": {
202
+ "metrics_to_compute": ["f1"],
203
+ "longbench_score_key": "f1",
204
+ "display_keys": ["f1"],
205
+ },
206
+ "gov_report_e": {
207
+ "metrics_to_compute": ["rouge"],
208
+ "longbench_score_key": "rouge/geometric_mean",
209
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
210
+ },
211
+ "multi_news_e": {
212
+ "metrics_to_compute": ["rouge"],
213
+ "longbench_score_key": "rouge/geometric_mean",
214
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
215
+ },
216
+ "trec_e": {
217
+ "metrics_to_compute": ["exact_match"],
218
+ "longbench_score_key": "exact_match",
219
+ "display_keys": ["exact_match"],
220
+ },
221
+ "triviaqa_e": {
222
+ "metrics_to_compute": ["f1"],
223
+ "longbench_score_key": "f1",
224
+ "display_keys": ["f1"],
225
+ },
226
+ "samsum_e": {
227
+ "metrics_to_compute": ["rouge"],
228
+ "longbench_score_key": "rouge/geometric_mean",
229
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
230
+ },
231
+ "passage_count_e": {
232
+ "metrics_to_compute": ["exact_match"],
233
+ "longbench_score_key": "exact_match",
234
+ "display_keys": ["exact_match"],
235
+ },
236
+ "passage_retrieval_en_e": {
237
+ "metrics_to_compute": ["exact_match"],
238
+ "longbench_score_key": "exact_match",
239
+ "display_keys": ["exact_match"],
240
+ },
241
+ "lcc_e": {
242
+ "metrics_to_compute": ["edit_sim"],
243
+ "longbench_score_key": "edit_sim",
244
+ "display_keys": ["edit_sim"],
245
+ },
246
+ "repobench-p_e": {
247
+ "metrics_to_compute": ["edit_sim"],
248
+ "longbench_score_key": "edit_sim",
249
+ "display_keys": ["edit_sim"],
250
+ },
251
+ }
252
+
253
+
254
+ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
255
+ class longbench(datasets.Metric):
256
+ def __init__(self, *args, **kwargs):
257
+ super().__init__(*args, **kwargs)
258
+
259
+ self._compute_helper_kwargs_fn = {
260
+ "rouge": lambda: {
261
+ "metric_fn": compute_rouge,
262
+ "agg_fn": max,
263
+ "metric_fn_kwargs": {"use_stemmer": False},
264
+ "metric_returns_per_example": True,
265
+ "transform_single_input_fn": lambda text: rouge_postprocess_text(text),
266
+ "transform_result_fn": lambda output: {
267
+ key: (value[0] if isinstance(value, list) else value).fmeasure * 100
268
+ for key, value in output.items()
269
+ },
270
+ "transform_aggregated_result_fn": lambda output: output.update(
271
+ {"geometric_mean": (output["rouge1"] * output["rouge2"] * output["rougeL"]) ** (1.0 / 3.0)}
272
+ )
273
+ or output,
274
+ },
275
+ "exact_match": lambda: {
276
+ "metric_fn": compute_exact_match,
277
+ "agg_fn": None, # compute_exact_match already takes max
278
+ "transform_result_fn": lambda output: {None: output},
279
+ },
280
+ "f1": lambda: {
281
+ "metric_fn": compute_f1,
282
+ "agg_fn": None, # compute_f1 already takes max
283
+ "transform_result_fn": lambda output: {None: output},
284
+ },
285
+ "edit_sim": lambda: {
286
+ "metric_fn": compute_edit_sim,
287
+ "agg_fn": None, # compute_edit_sim already takes max
288
+ "transform_result_fn": lambda output: {None: output},
289
+ },
290
+ }
291
+
292
+ custom_metrics = (
293
+ [metric for metric in self.config_name.split(",") if len(metric) > 0]
294
+ if self.config_name.startswith(",")
295
+ else None
296
+ )
297
+ if custom_metrics is not None:
298
+ for metric in custom_metrics:
299
+ if metric not in self._compute_helper_kwargs_fn:
300
+ raise KeyError(
301
+ f"You should supply a metric name selected in {list(self._compute_helper_kwargs_fn.keys())}"
302
+ )
303
+ self._metrics_to_compute = custom_metrics
304
+ else:
305
+ if self.config_name not in DATASET_TO_METRICS:
306
+ raise KeyError(f"You should supply a configuration name selected in {list(DATASET_TO_METRICS.keys())}")
307
+ self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"]
308
+
309
+ def _info(self):
310
+ return datasets.MetricInfo(
311
+ description=_DESCRIPTION,
312
+ citation=_CITATION,
313
+ inputs_description=_KWARGS_DESCRIPTION,
314
+ features=datasets.Features(
315
+ {
316
+ "predictions": datasets.Value("string"),
317
+ "references": datasets.Sequence(datasets.Value("string")),
318
+ }
319
+ ),
320
+ codebase_urls=[],
321
+ reference_urls=[],
322
+ )
323
+
324
+ def convert_from_map_format(self, id_to_pred, id_to_labels):
325
+ index_to_id = list(id_to_pred.keys())
326
+ predictions = [id_to_pred[id_] for id_ in index_to_id]
327
+ references = [id_to_labels[id_] for id_ in index_to_id]
328
+ return {"predictions": predictions, "references": references}
329
+
330
+ def _compute(self, predictions, references):
331
+ metrics = {}
332
+ for metric in self._metrics_to_compute:
333
+ result = _compute_helper(
334
+ deepcopy(predictions),
335
+ deepcopy(references),
336
+ **self._compute_helper_kwargs_fn[metric](),
337
+ )
338
+ metrics.update(
339
+ {(f"{metric}/{key}" if key is not None else metric): value for key, value in result.items()}
340
+ )
341
+ metrics["num_predicted"] = len(predictions)
342
+ prediction_lengths = [len(prediction) for prediction in predictions]
343
+ metrics["mean_prediction_length_characters"] = sum(prediction_lengths) / len(prediction_lengths)
344
+
345
+ metrics = {key: round(value, 4) for key, value in metrics.items()}
346
+
347
+ if self.config_name in DATASET_TO_METRICS:
348
+ longbench_score_key = DATASET_TO_METRICS[self.config_name]["longbench_score_key"]
349
+ if longbench_score_key is not None:
350
+ metrics["longbench_score"] = metrics[longbench_score_key]
351
+ else:
352
+ metrics["longbench_score"] = None
353
+
354
+ display_keys = DATASET_TO_METRICS[self.config_name]["display_keys"]
355
+ metrics["display_keys"] = display_keys
356
+ metrics["display"] = []
357
+ for display_key in display_keys:
358
+ metrics["display"].append(metrics[display_key])
359
+
360
+ return metrics
361
+
362
+
363
+ def _compute_helper(
364
+ predictions,
365
+ references,
366
+ metric_fn,
367
+ agg_fn,
368
+ metric_fn_kwargs=None,
369
+ transform_single_input_fn=None,
370
+ transform_result_fn=None,
371
+ transform_aggregated_result_fn=None,
372
+ metric_returns_per_example=False,
373
+ ):
374
+ if metric_fn_kwargs is None:
375
+ metric_fn_kwargs = {}
376
+
377
+ if agg_fn is None:
378
+ assert metric_returns_per_example is False
379
+
380
+ if transform_single_input_fn is not None:
381
+ predictions = [transform_single_input_fn(prediction) for prediction in predictions]
382
+ references = [
383
+ [transform_single_input_fn(reference) for reference in reference_list] for reference_list in references
384
+ ]
385
+
386
+ if transform_result_fn is None:
387
+ transform_result_fn = lambda x: x
388
+ do_transform_result = False
389
+ else:
390
+ do_transform_result = True
391
+
392
+ if transform_aggregated_result_fn is None:
393
+ transform_aggregated_result_fn = lambda x: x
394
+
395
+ if agg_fn is not None:
396
+ # Required when the metric doesn't do the aggregation we need
397
+ scores = defaultdict(list)
398
+ if metric_returns_per_example is False:
399
+ # If when given a list of prediction and references the metric returns an aggregated score,
400
+ # we need to compute the metric for each prediction and reference and then aggregate the results.
401
+ # This is only an issue when we want to get the best aggregated score (e.g. max) for prediction
402
+ # with multiple references.
403
+ for prediction, reference_list in zip(predictions, references):
404
+ prediction_scores = defaultdict(list)
405
+ for reference in reference_list:
406
+ result = transform_result_fn(metric_fn([prediction], [reference], **metric_fn_kwargs))
407
+ for key in result:
408
+ prediction_scores[key].append(result[key])
409
+ for key in prediction_scores:
410
+ scores[key].append(agg_fn(prediction_scores[key]))
411
+ else:
412
+ # Flatten the references and then aggregate per prediction with agg_fn
413
+ mapping = [[] for _ in range(len(predictions))]
414
+ flattened_predictions = []
415
+ flattened_references = []
416
+ for i, prediction in enumerate(predictions):
417
+ for reference in references[i]:
418
+ flattened_predictions.append(prediction)
419
+ flattened_references.append(reference)
420
+ mapping[i].append(len(flattened_references) - 1)
421
+
422
+ results = metric_fn(flattened_predictions, flattened_references, **metric_fn_kwargs)
423
+ if isinstance(results, dict):
424
+ # Convert a dictionary with lists per key to a list with dictionary with the same keys per element
425
+ results_list = [{k: None for k in results} for _ in range(len(flattened_predictions))]
426
+ for k, v in results.items():
427
+ for i in range(len(v)):
428
+ results_list[i][k] = v[i]
429
+ else:
430
+ results_list = results
431
+
432
+ if do_transform_result:
433
+ for i in range(len(results_list)):
434
+ results_list[i] = transform_result_fn(results_list[i])
435
+
436
+ for reference_indexes in mapping:
437
+ prediction_scores = defaultdict(list)
438
+ for reference_index in reference_indexes:
439
+ result = results_list[reference_index]
440
+ for key in result:
441
+ prediction_scores[key].append(result[key])
442
+ for key in prediction_scores:
443
+ scores[key].append(agg_fn(prediction_scores[key]))
444
+
445
+ return transform_aggregated_result_fn({key: sum(value) / len(value) for key, value in scores.items()})
446
+ else:
447
+ return transform_aggregated_result_fn(
448
+ transform_result_fn(metric_fn(predictions, references, **metric_fn_kwargs))
449
+ )
metrics/rouge.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copied from https://github.com/huggingface/datasets/blob/d3c7b9481d427ce41256edaf6773c47570f06f3b/metrics/rouge/rouge.py
2
+ # Added multiprocessing
3
+
4
+ import multiprocessing
5
+ import nltk
6
+ from rouge_score import rouge_scorer
7
+ from multiprocessing import Pool
8
+
9
+
10
+ def compute_rouge(predictions, references, rouge_types=None, use_stemmer=False):
11
+ if rouge_types is None:
12
+ rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
13
+
14
+ scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
15
+ with Pool() as p:
16
+ scores = p.starmap(scorer.score, zip(references, predictions))
17
+
18
+ result = {}
19
+ for key in scores[0]:
20
+ result[key] = list(score[key] for score in scores)
21
+
22
+ return result
23
+
24
+
25
+ # Copied from https://github.com/huggingface/transformers/blob/3977b58437b8ce1ea1da6e31747d888efec2419b/examples/pytorch/summarization/run_summarization.py#L520
26
+ def postprocess_text(text):
27
+ # rougeLSum expects newline after each sentence
28
+ return "\n".join(nltk.sent_tokenize(text))