eduagarcia commited on
Commit
9662283
1 Parent(s): 59ba827

fix f1_score calculation bug

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json +1207 -1207
  2. 152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json +6 -6
  3. BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json +1207 -1207
  4. BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json +6 -6
  5. BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json +1207 -1207
  6. BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json +6 -6
  7. CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json +1207 -1207
  8. CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json +6 -6
  9. Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json +1207 -1207
  10. Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json +18 -18
  11. CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json +1207 -1207
  12. CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json +6 -6
  13. Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json +1207 -1207
  14. Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json +6 -6
  15. Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json +1207 -1207
  16. Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json +6 -6
  17. DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json +1207 -1207
  18. DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json +6 -6
  19. EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json +1287 -1287
  20. EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json +18 -18
  21. EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json +1287 -1287
  22. EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json +22 -22
  23. EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json +1207 -1207
  24. EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json +6 -6
  25. FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json +1287 -1287
  26. FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json +6 -6
  27. GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json +1207 -1207
  28. GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json +6 -6
  29. GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json +1207 -1207
  30. GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json +6 -6
  31. HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json +1287 -1287
  32. HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json +6 -6
  33. HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json +1207 -1207
  34. HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json +22 -22
  35. HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json +1207 -1207
  36. HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json +22 -22
  37. HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json +1207 -1207
  38. HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json +18 -18
  39. Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json +1287 -1287
  40. Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json +6 -6
  41. Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json +1287 -1287
  42. Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json +6 -6
  43. JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json +1287 -1287
  44. JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json +1207 -1207
  45. JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json +6 -6
  46. JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json +6 -6
  47. Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json +1207 -1207
  48. Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json +6 -6
  49. Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json +1207 -1207
  50. Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json +6 -6
152334H/miqu-1-70b-sf/raw_2024-05-23T11-20-45.843993/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9309631191550011,
5
- "acc,all": 0.9309640522875817,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.6972270341129023,
10
- "mse,all": 1.0077399689542483,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.6481223922114048,
15
- "acc,exam_id__UNICAMP_2023": 0.6511627906976745,
16
- "acc,exam_id__UNICAMP_2019": 0.66,
17
- "acc,exam_id__USP_2022": 0.5918367346938775,
18
- "acc,exam_id__UNICAMP_2020": 0.6909090909090909,
19
- "acc,exam_id__UNICAMP_2022": 0.6410256410256411,
20
- "acc,exam_id__USP_2018": 0.5555555555555556,
21
- "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216,
22
- "acc,exam_id__USP_2019": 0.575,
23
- "acc,exam_id__USP_2021": 0.5769230769230769,
24
- "acc,exam_id__USP_2024": 0.8048780487804879,
25
- "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131,
26
- "acc,exam_id__USP_2020": 0.6785714285714286,
27
- "acc,exam_id__UNICAMP_2024": 0.6666666666666666,
28
- "acc,exam_id__UNICAMP_2018": 0.6111111111111112,
29
- "acc,exam_id__USP_2023": 0.75,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.7466759972008398,
35
- "acc,exam_id__2016": 0.7272727272727273,
36
- "acc,exam_id__2022": 0.6917293233082706,
37
- "acc,exam_id__2009": 0.7217391304347827,
38
- "acc,exam_id__2012": 0.75,
39
- "acc,exam_id__2014": 0.7706422018348624,
40
- "acc,exam_id__2017": 0.7586206896551724,
41
- "acc,exam_id__2016_2": 0.7235772357723578,
42
- "acc,exam_id__2010": 0.7948717948717948,
43
- "acc,exam_id__2011": 0.811965811965812,
44
- "acc,exam_id__2013": 0.6944444444444444,
45
- "acc,exam_id__2015": 0.7310924369747899,
46
- "acc,exam_id__2023": 0.7851851851851852
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7641750093536355,
50
- "acc,all": 0.803076923076923,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8382584367896485,
56
- "acc,all": 0.8414285714285714
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.5398633257403189,
60
- "acc,exam_id__2012-09": 0.5454545454545454,
61
- "acc,exam_id__2012-06a": 0.5625,
62
- "acc,exam_id__2014-14": 0.5625,
63
- "acc,exam_id__2014-13": 0.4625,
64
- "acc,exam_id__2010-01": 0.36470588235294116,
65
- "acc,exam_id__2017-22": 0.6125,
66
- "acc,exam_id__2011-04": 0.5,
67
- "acc,exam_id__2017-23": 0.4625,
68
- "acc,exam_id__2012-08": 0.5625,
69
- "acc,exam_id__2015-16": 0.525,
70
- "acc,exam_id__2011-03": 0.41414141414141414,
71
- "acc,exam_id__2014-15": 0.6538461538461539,
72
- "acc,exam_id__2016-20a": 0.525,
73
- "acc,exam_id__2016-21": 0.4375,
74
- "acc,exam_id__2017-24": 0.5375,
75
- "acc,exam_id__2013-11": 0.575,
76
- "acc,exam_id__2013-12": 0.65,
77
- "acc,exam_id__2011-05": 0.525,
78
- "acc,exam_id__2010-02": 0.59,
79
- "acc,exam_id__2012-07": 0.5,
80
- "acc,exam_id__2013-10": 0.6,
81
- "acc,exam_id__2016-20": 0.5375,
82
- "acc,exam_id__2015-18": 0.6125,
83
- "acc,exam_id__2016-19": 0.5512820512820513,
84
- "acc,exam_id__2015-17": 0.6923076923076923,
85
- "acc,exam_id__2018-25": 0.5,
86
- "acc,exam_id__2012-06": 0.55,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7336708394698086,
92
- "acc,all": 0.7602820211515864
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5368519669911656,
96
- "acc,all": 0.7417910447761195,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f660e5ba200>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f660e5b9bc0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f660e5b9e40>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f660e5ba3e0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f660e5ba660>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f660e5b9580>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f660e5b9800>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": true,
1069
- "chat_type": "user_assistant",
1070
- "n_gpus": 2,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "1dca4cce36f01f2104ee2e6b97bac6ff7bb300c1",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 137953316864,
1075
- "model_num_parameters": 68976648192,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 1,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1385.9889705882354,
1094
- "min_seq_length": 1363,
1095
- "max_seq_length": 1452,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1609.9889705882354,
1109
- "min_seq_length": 1587,
1110
- "max_seq_length": 1676,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1693.7426981919332,
1124
- "min_seq_length": 1327,
1125
- "max_seq_length": 2453,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1572.9881035689293,
1139
- "min_seq_length": 1320,
1140
- "max_seq_length": 2612,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1608.1184615384616,
1154
- "min_seq_length": 1556,
1155
- "max_seq_length": 1715,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1425.9178571428572,
1169
- "min_seq_length": 1402,
1170
- "max_seq_length": 1672,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1345.4145785876992,
1184
- "min_seq_length": 1090,
1185
- "max_seq_length": 1827,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1917.801410105758,
1199
- "min_seq_length": 1883,
1200
- "max_seq_length": 1961,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1692.6845771144278,
1214
- "min_seq_length": 1671,
1215
- "max_seq_length": 1810,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=152334H/miqu-1-70b-sf,dtype=float16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9309631191550011,
5
+ "acc,all": 0.9309640522875817,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.6972270341129023,
10
+ "mse,all": 1.0077399689542483,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.6481223922114048,
15
+ "acc,exam_id__UNICAMP_2023": 0.6511627906976745,
16
+ "acc,exam_id__UNICAMP_2019": 0.66,
17
+ "acc,exam_id__USP_2022": 0.5918367346938775,
18
+ "acc,exam_id__UNICAMP_2020": 0.6909090909090909,
19
+ "acc,exam_id__UNICAMP_2022": 0.6410256410256411,
20
+ "acc,exam_id__USP_2018": 0.5555555555555556,
21
+ "acc,exam_id__UNICAMP_2021_2": 0.6862745098039216,
22
+ "acc,exam_id__USP_2019": 0.575,
23
+ "acc,exam_id__USP_2021": 0.5769230769230769,
24
+ "acc,exam_id__USP_2024": 0.8048780487804879,
25
+ "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131,
26
+ "acc,exam_id__USP_2020": 0.6785714285714286,
27
+ "acc,exam_id__UNICAMP_2024": 0.6666666666666666,
28
+ "acc,exam_id__UNICAMP_2018": 0.6111111111111112,
29
+ "acc,exam_id__USP_2023": 0.75,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.7466759972008398,
35
+ "acc,exam_id__2016": 0.7272727272727273,
36
+ "acc,exam_id__2022": 0.6917293233082706,
37
+ "acc,exam_id__2009": 0.7217391304347827,
38
+ "acc,exam_id__2012": 0.75,
39
+ "acc,exam_id__2014": 0.7706422018348624,
40
+ "acc,exam_id__2017": 0.7586206896551724,
41
+ "acc,exam_id__2016_2": 0.7235772357723578,
42
+ "acc,exam_id__2010": 0.7948717948717948,
43
+ "acc,exam_id__2011": 0.811965811965812,
44
+ "acc,exam_id__2013": 0.6944444444444444,
45
+ "acc,exam_id__2015": 0.7310924369747899,
46
+ "acc,exam_id__2023": 0.7851851851851852
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7641750093536355,
50
+ "acc,all": 0.803076923076923,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8382584367896485,
56
+ "acc,all": 0.8414285714285714
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.5398633257403189,
60
+ "acc,exam_id__2012-09": 0.5454545454545454,
61
+ "acc,exam_id__2012-06a": 0.5625,
62
+ "acc,exam_id__2014-14": 0.5625,
63
+ "acc,exam_id__2014-13": 0.4625,
64
+ "acc,exam_id__2010-01": 0.36470588235294116,
65
+ "acc,exam_id__2017-22": 0.6125,
66
+ "acc,exam_id__2011-04": 0.5,
67
+ "acc,exam_id__2017-23": 0.4625,
68
+ "acc,exam_id__2012-08": 0.5625,
69
+ "acc,exam_id__2015-16": 0.525,
70
+ "acc,exam_id__2011-03": 0.41414141414141414,
71
+ "acc,exam_id__2014-15": 0.6538461538461539,
72
+ "acc,exam_id__2016-20a": 0.525,
73
+ "acc,exam_id__2016-21": 0.4375,
74
+ "acc,exam_id__2017-24": 0.5375,
75
+ "acc,exam_id__2013-11": 0.575,
76
+ "acc,exam_id__2013-12": 0.65,
77
+ "acc,exam_id__2011-05": 0.525,
78
+ "acc,exam_id__2010-02": 0.59,
79
+ "acc,exam_id__2012-07": 0.5,
80
+ "acc,exam_id__2013-10": 0.6,
81
+ "acc,exam_id__2016-20": 0.5375,
82
+ "acc,exam_id__2015-18": 0.6125,
83
+ "acc,exam_id__2016-19": 0.5512820512820513,
84
+ "acc,exam_id__2015-17": 0.6923076923076923,
85
+ "acc,exam_id__2018-25": 0.5,
86
+ "acc,exam_id__2012-06": 0.55,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7336708394698086,
92
+ "acc,all": 0.7602820211515864
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.7158026226548874,
96
+ "acc,all": 0.7417910447761195,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f660e5ba200>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f660e5b9bc0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f660e5b9e40>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f660e5ba3e0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f660e5ba660>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f660e5b9580>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f660e5b9800>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "user_assistant",
1070
+ "n_gpus": 2,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "1dca4cce36f01f2104ee2e6b97bac6ff7bb300c1",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 137953316864,
1075
+ "model_num_parameters": 68976648192,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 1,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1385.9889705882354,
1094
+ "min_seq_length": 1363,
1095
+ "max_seq_length": 1452,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1609.9889705882354,
1109
+ "min_seq_length": 1587,
1110
+ "max_seq_length": 1676,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1693.7426981919332,
1124
+ "min_seq_length": 1327,
1125
+ "max_seq_length": 2453,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1572.9881035689293,
1139
+ "min_seq_length": 1320,
1140
+ "max_seq_length": 2612,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1608.1184615384616,
1154
+ "min_seq_length": 1556,
1155
+ "max_seq_length": 1715,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1425.9178571428572,
1169
+ "min_seq_length": 1402,
1170
+ "max_seq_length": 1672,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1345.4145785876992,
1184
+ "min_seq_length": 1090,
1185
+ "max_seq_length": 1827,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1917.801410105758,
1199
+ "min_seq_length": 1883,
1200
+ "max_seq_length": 1961,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1692.6845771144278,
1214
+ "min_seq_length": 1671,
1215
+ "max_seq_length": 1810,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=152334H/miqu-1-70b-sf,dtype=float16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
152334H/miqu-1-70b-sf/results_2024-05-23T11-20-45.843993.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.7150897912249694,
38
- "all_grouped_npm": 0.5797294824942764,
39
  "all_grouped": {
40
  "enem_challenge": 0.7466759972008398,
41
  "bluex": 0.6481223922114048,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7641750093536355,
46
  "hatebr_offensive": 0.8382584367896485,
47
  "portuguese_hate_speech": 0.7336708394698086,
48
- "tweetsentbr": 0.5368519669911656
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7466759972008398,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7641750093536355,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8382584367896485,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7336708394698086,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5368519669911656
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7466759972008398,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7336708394698086
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5368519669911656,
154
  "acc,all": 0.7417910447761195,
155
- "main_score": 0.5368519669911656
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7349731974098275,
38
+ "all_grouped_npm": 0.6093178845550768,
39
  "all_grouped": {
40
  "enem_challenge": 0.7466759972008398,
41
  "bluex": 0.6481223922114048,
 
45
  "faquad_nli": 0.7641750093536355,
46
  "hatebr_offensive": 0.8382584367896485,
47
  "portuguese_hate_speech": 0.7336708394698086,
48
+ "tweetsentbr": 0.7158026226548874
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7466759972008398,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7641750093536355,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8382584367896485,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7336708394698086,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.7158026226548874
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7466759972008398,
 
150
  "main_score": 0.7336708394698086
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.7158026226548874,
154
  "acc,all": 0.7417910447761195,
155
+ "main_score": 0.7158026226548874
156
  }
157
  },
158
  "config_tasks": {
BAAI/Infinity-Instruct-3M-0613-Mistral-7B/raw_2024-06-22T01-31-31.647844/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9174712657490132,
5
- "acc,all": 0.9174836601307189,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7632047672731808,
10
- "mse,all": 0.5285130718954247,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5326842837273992,
15
- "acc,exam_id__USP_2022": 0.46938775510204084,
16
- "acc,exam_id__UNICAMP_2018": 0.35185185185185186,
17
- "acc,exam_id__USP_2021": 0.5576923076923077,
18
- "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
19
- "acc,exam_id__USP_2018": 0.46296296296296297,
20
- "acc,exam_id__USP_2024": 0.7317073170731707,
21
- "acc,exam_id__USP_2019": 0.5,
22
- "acc,exam_id__UNICAMP_2019": 0.5,
23
- "acc,exam_id__UNICAMP_2021_1": 0.5,
24
- "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
25
- "acc,exam_id__UNICAMP_2023": 0.6976744186046512,
26
- "acc,exam_id__UNICAMP_2020": 0.6363636363636364,
27
- "acc,exam_id__USP_2023": 0.6818181818181818,
28
- "acc,exam_id__USP_2020": 0.5535714285714286,
29
- "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6466060181945417,
35
- "acc,exam_id__2017": 0.6206896551724138,
36
- "acc,exam_id__2012": 0.6206896551724138,
37
- "acc,exam_id__2022": 0.6616541353383458,
38
- "acc,exam_id__2016": 0.6446280991735537,
39
- "acc,exam_id__2010": 0.6153846153846154,
40
- "acc,exam_id__2011": 0.7094017094017094,
41
- "acc,exam_id__2014": 0.6513761467889908,
42
- "acc,exam_id__2013": 0.6944444444444444,
43
- "acc,exam_id__2016_2": 0.6585365853658537,
44
- "acc,exam_id__2015": 0.6134453781512605,
45
- "acc,exam_id__2009": 0.5826086956521739,
46
- "acc,exam_id__2023": 0.6814814814814815
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.8241841468197617,
50
- "acc,all": 0.8769230769230769,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.7990490978163615,
56
- "acc,all": 0.8042857142857143
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.44510250569476084,
60
- "acc,exam_id__2013-11": 0.4625,
61
- "acc,exam_id__2016-21": 0.4875,
62
- "acc,exam_id__2012-06": 0.4625,
63
- "acc,exam_id__2013-10": 0.4375,
64
- "acc,exam_id__2012-09": 0.33766233766233766,
65
- "acc,exam_id__2012-08": 0.475,
66
- "acc,exam_id__2015-18": 0.4875,
67
- "acc,exam_id__2017-24": 0.5125,
68
- "acc,exam_id__2017-22": 0.4875,
69
- "acc,exam_id__2016-19": 0.5512820512820513,
70
- "acc,exam_id__2010-02": 0.52,
71
- "acc,exam_id__2012-07": 0.3875,
72
- "acc,exam_id__2017-23": 0.4,
73
- "acc,exam_id__2014-15": 0.41025641025641024,
74
- "acc,exam_id__2011-05": 0.475,
75
- "acc,exam_id__2015-16": 0.4375,
76
- "acc,exam_id__2016-20a": 0.4,
77
- "acc,exam_id__2014-14": 0.4875,
78
- "acc,exam_id__2018-25": 0.425,
79
- "acc,exam_id__2010-01": 0.3411764705882353,
80
- "acc,exam_id__2014-13": 0.4,
81
- "acc,exam_id__2013-12": 0.475,
82
- "acc,exam_id__2016-20": 0.4125,
83
- "acc,exam_id__2012-06a": 0.4875,
84
- "acc,exam_id__2011-03": 0.35353535353535354,
85
- "acc,exam_id__2011-04": 0.3625,
86
- "acc,exam_id__2015-17": 0.5512820512820513,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7141208181486736,
92
- "acc,all": 0.7520564042303173
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.49998821485827083,
96
- "acc,all": 0.7079601990049751,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f8c200b76a0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f8c200b7060>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8c200b72e0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f8c200b7880>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8c200b7b00>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f8c200b6a20>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f8c200b6ca0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 4,
1064
- "non_truncated": 14146,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 4,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "c1bec5cb82a7a561d8c7459cc9f5685f938c7f34",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 15020376064,
1075
- "model_num_parameters": 7241748480,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 32,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1531.7455065359477,
1094
- "min_seq_length": 1508,
1095
- "max_seq_length": 1598,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1740.7455065359477,
1109
- "min_seq_length": 1717,
1110
- "max_seq_length": 1807,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 2,
1119
- "non_truncated": 717,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 2,
1123
- "mean_seq_length": 1761.9262865090404,
1124
- "min_seq_length": 1385,
1125
- "max_seq_length": 2562,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.9972183588317107
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 2,
1134
- "non_truncated": 1427,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 2,
1138
- "mean_seq_length": 1662.039188243527,
1139
- "min_seq_length": 1396,
1140
- "max_seq_length": 2660,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.998600419874038
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1756.9876923076922,
1154
- "min_seq_length": 1701,
1155
- "max_seq_length": 1877,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1567.3878571428572,
1169
- "min_seq_length": 1544,
1170
- "max_seq_length": 1818,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1407.764464692483,
1184
- "min_seq_length": 1141,
1185
- "max_seq_length": 1910,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 2068.3360752056406,
1199
- "min_seq_length": 2033,
1200
- "max_seq_length": 2107,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1814.2492537313433,
1214
- "min_seq_length": 1793,
1215
- "max_seq_length": 1909,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0613-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9174712657490132,
5
+ "acc,all": 0.9174836601307189,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7632047672731808,
10
+ "mse,all": 0.5285130718954247,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5326842837273992,
15
+ "acc,exam_id__USP_2022": 0.46938775510204084,
16
+ "acc,exam_id__UNICAMP_2018": 0.35185185185185186,
17
+ "acc,exam_id__USP_2021": 0.5576923076923077,
18
+ "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
19
+ "acc,exam_id__USP_2018": 0.46296296296296297,
20
+ "acc,exam_id__USP_2024": 0.7317073170731707,
21
+ "acc,exam_id__USP_2019": 0.5,
22
+ "acc,exam_id__UNICAMP_2019": 0.5,
23
+ "acc,exam_id__UNICAMP_2021_1": 0.5,
24
+ "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
25
+ "acc,exam_id__UNICAMP_2023": 0.6976744186046512,
26
+ "acc,exam_id__UNICAMP_2020": 0.6363636363636364,
27
+ "acc,exam_id__USP_2023": 0.6818181818181818,
28
+ "acc,exam_id__USP_2020": 0.5535714285714286,
29
+ "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6466060181945417,
35
+ "acc,exam_id__2017": 0.6206896551724138,
36
+ "acc,exam_id__2012": 0.6206896551724138,
37
+ "acc,exam_id__2022": 0.6616541353383458,
38
+ "acc,exam_id__2016": 0.6446280991735537,
39
+ "acc,exam_id__2010": 0.6153846153846154,
40
+ "acc,exam_id__2011": 0.7094017094017094,
41
+ "acc,exam_id__2014": 0.6513761467889908,
42
+ "acc,exam_id__2013": 0.6944444444444444,
43
+ "acc,exam_id__2016_2": 0.6585365853658537,
44
+ "acc,exam_id__2015": 0.6134453781512605,
45
+ "acc,exam_id__2009": 0.5826086956521739,
46
+ "acc,exam_id__2023": 0.6814814814814815
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.8241841468197617,
50
+ "acc,all": 0.8769230769230769,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.7990490978163615,
56
+ "acc,all": 0.8042857142857143
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.44510250569476084,
60
+ "acc,exam_id__2013-11": 0.4625,
61
+ "acc,exam_id__2016-21": 0.4875,
62
+ "acc,exam_id__2012-06": 0.4625,
63
+ "acc,exam_id__2013-10": 0.4375,
64
+ "acc,exam_id__2012-09": 0.33766233766233766,
65
+ "acc,exam_id__2012-08": 0.475,
66
+ "acc,exam_id__2015-18": 0.4875,
67
+ "acc,exam_id__2017-24": 0.5125,
68
+ "acc,exam_id__2017-22": 0.4875,
69
+ "acc,exam_id__2016-19": 0.5512820512820513,
70
+ "acc,exam_id__2010-02": 0.52,
71
+ "acc,exam_id__2012-07": 0.3875,
72
+ "acc,exam_id__2017-23": 0.4,
73
+ "acc,exam_id__2014-15": 0.41025641025641024,
74
+ "acc,exam_id__2011-05": 0.475,
75
+ "acc,exam_id__2015-16": 0.4375,
76
+ "acc,exam_id__2016-20a": 0.4,
77
+ "acc,exam_id__2014-14": 0.4875,
78
+ "acc,exam_id__2018-25": 0.425,
79
+ "acc,exam_id__2010-01": 0.3411764705882353,
80
+ "acc,exam_id__2014-13": 0.4,
81
+ "acc,exam_id__2013-12": 0.475,
82
+ "acc,exam_id__2016-20": 0.4125,
83
+ "acc,exam_id__2012-06a": 0.4875,
84
+ "acc,exam_id__2011-03": 0.35353535353535354,
85
+ "acc,exam_id__2011-04": 0.3625,
86
+ "acc,exam_id__2015-17": 0.5512820512820513,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7141208181486736,
92
+ "acc,all": 0.7520564042303173
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6666509531443612,
96
+ "acc,all": 0.7079601990049751,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f8c200b76a0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f8c200b7060>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8c200b72e0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f8c200b7880>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f8c200b7b00>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f8c200b6a20>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f8c200b6ca0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 4,
1064
+ "non_truncated": 14146,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 4,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "c1bec5cb82a7a561d8c7459cc9f5685f938c7f34",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 15020376064,
1075
+ "model_num_parameters": 7241748480,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 32,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1531.7455065359477,
1094
+ "min_seq_length": 1508,
1095
+ "max_seq_length": 1598,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1740.7455065359477,
1109
+ "min_seq_length": 1717,
1110
+ "max_seq_length": 1807,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 2,
1119
+ "non_truncated": 717,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 2,
1123
+ "mean_seq_length": 1761.9262865090404,
1124
+ "min_seq_length": 1385,
1125
+ "max_seq_length": 2562,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.9972183588317107
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 2,
1134
+ "non_truncated": 1427,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 2,
1138
+ "mean_seq_length": 1662.039188243527,
1139
+ "min_seq_length": 1396,
1140
+ "max_seq_length": 2660,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.998600419874038
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1756.9876923076922,
1154
+ "min_seq_length": 1701,
1155
+ "max_seq_length": 1877,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1567.3878571428572,
1169
+ "min_seq_length": 1544,
1170
+ "max_seq_length": 1818,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1407.764464692483,
1184
+ "min_seq_length": 1141,
1185
+ "max_seq_length": 1910,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 2068.3360752056406,
1199
+ "min_seq_length": 2033,
1200
+ "max_seq_length": 2107,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1814.2492537313433,
1214
+ "min_seq_length": 1793,
1215
+ "max_seq_length": 1909,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0613-Mistral-7B,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
BAAI/Infinity-Instruct-3M-0613-Mistral-7B/results_2024-06-22T01-31-31.647844.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6824901242535515,
38
- "all_grouped_npm": 0.5328536851326985,
39
  "all_grouped": {
40
  "enem_challenge": 0.6466060181945417,
41
  "bluex": 0.5326842837273992,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.8241841468197617,
46
  "hatebr_offensive": 0.7990490978163615,
47
  "portuguese_hate_speech": 0.7141208181486736,
48
- "tweetsentbr": 0.49998821485827083
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6466060181945417,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8241841468197617,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7990490978163615,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7141208181486736,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.49998821485827083
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6466060181945417,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7141208181486736
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.49998821485827083,
154
  "acc,all": 0.7079601990049751,
155
- "main_score": 0.49998821485827083
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7010082062853393,
38
+ "all_grouped_npm": 0.5604103548228588,
39
  "all_grouped": {
40
  "enem_challenge": 0.6466060181945417,
41
  "bluex": 0.5326842837273992,
 
45
  "faquad_nli": 0.8241841468197617,
46
  "hatebr_offensive": 0.7990490978163615,
47
  "portuguese_hate_speech": 0.7141208181486736,
48
+ "tweetsentbr": 0.6666509531443612
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6466060181945417,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8241841468197617,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7990490978163615,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7141208181486736,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6666509531443612
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6466060181945417,
 
150
  "main_score": 0.7141208181486736
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6666509531443612,
154
  "acc,all": 0.7079601990049751,
155
+ "main_score": 0.6666509531443612
156
  }
157
  },
158
  "config_tasks": {
BAAI/Infinity-Instruct-3M-0625-Mistral-7B/raw_2024-07-19T01-41-09.242433/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9195035639155449,
5
- "acc,all": 0.9195261437908496,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7748240246646378,
10
- "mse,all": 0.5014828431372549,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5507649513212796,
15
- "acc,exam_id__UNICAMP_2024": 0.4444444444444444,
16
- "acc,exam_id__USP_2018": 0.5,
17
- "acc,exam_id__USP_2022": 0.5918367346938775,
18
- "acc,exam_id__USP_2019": 0.525,
19
- "acc,exam_id__UNICAMP_2019": 0.5,
20
- "acc,exam_id__USP_2024": 0.7560975609756098,
21
- "acc,exam_id__USP_2023": 0.6363636363636364,
22
- "acc,exam_id__UNICAMP_2022": 0.5384615384615384,
23
- "acc,exam_id__USP_2021": 0.5769230769230769,
24
- "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
25
- "acc,exam_id__USP_2020": 0.5535714285714286,
26
- "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253,
27
- "acc,exam_id__UNICAMP_2018": 0.5,
28
- "acc,exam_id__UNICAMP_2023": 0.6744186046511628,
29
- "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6634009797060881,
35
- "acc,exam_id__2022": 0.6766917293233082,
36
- "acc,exam_id__2013": 0.7129629629629629,
37
- "acc,exam_id__2012": 0.6551724137931034,
38
- "acc,exam_id__2010": 0.6581196581196581,
39
- "acc,exam_id__2016_2": 0.6422764227642277,
40
- "acc,exam_id__2023": 0.6814814814814815,
41
- "acc,exam_id__2016": 0.6528925619834711,
42
- "acc,exam_id__2009": 0.6086956521739131,
43
- "acc,exam_id__2014": 0.6697247706422018,
44
- "acc,exam_id__2017": 0.6637931034482759,
45
- "acc,exam_id__2011": 0.7008547008547008,
46
- "acc,exam_id__2015": 0.6386554621848739
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.8110585067106806,
50
- "acc,all": 0.8661538461538462,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8052263390689189,
56
- "acc,all": 0.8107142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.44419134396355353,
60
- "acc,exam_id__2014-13": 0.425,
61
- "acc,exam_id__2012-06": 0.45,
62
- "acc,exam_id__2018-25": 0.4375,
63
- "acc,exam_id__2012-09": 0.3246753246753247,
64
- "acc,exam_id__2012-07": 0.375,
65
- "acc,exam_id__2016-20a": 0.35,
66
- "acc,exam_id__2011-05": 0.5375,
67
- "acc,exam_id__2017-24": 0.45,
68
- "acc,exam_id__2016-21": 0.4875,
69
- "acc,exam_id__2013-10": 0.4625,
70
- "acc,exam_id__2013-11": 0.4625,
71
- "acc,exam_id__2017-22": 0.475,
72
- "acc,exam_id__2010-01": 0.35294117647058826,
73
- "acc,exam_id__2016-20": 0.45,
74
- "acc,exam_id__2017-23": 0.4125,
75
- "acc,exam_id__2012-08": 0.475,
76
- "acc,exam_id__2014-14": 0.5375,
77
- "acc,exam_id__2010-02": 0.49,
78
- "acc,exam_id__2015-16": 0.375,
79
- "acc,exam_id__2015-18": 0.5,
80
- "acc,exam_id__2015-17": 0.5897435897435898,
81
- "acc,exam_id__2011-03": 0.32323232323232326,
82
- "acc,exam_id__2013-12": 0.5,
83
- "acc,exam_id__2011-04": 0.3375,
84
- "acc,exam_id__2016-19": 0.5,
85
- "acc,exam_id__2014-15": 0.44871794871794873,
86
- "acc,exam_id__2012-06a": 0.4875,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7276695425104325,
92
- "acc,all": 0.7649823736780259
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5138720675986127,
96
- "acc,all": 0.7189054726368159,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f6c7c73ab60>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f6c7c73a520>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6c7c73a7a0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f6c7c73ad40>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6c7c73afc0>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f6c7c739ee0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f6c7c73a160>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 4,
1064
- "non_truncated": 14146,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 4,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "302e3ae0bcc50dae3fb69fc1b08b518398e8c407",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 14483505152,
1075
- "model_num_parameters": 7241748480,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 32,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1531.7455065359477,
1094
- "min_seq_length": 1508,
1095
- "max_seq_length": 1598,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1740.7455065359477,
1109
- "min_seq_length": 1717,
1110
- "max_seq_length": 1807,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 2,
1119
- "non_truncated": 717,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 2,
1123
- "mean_seq_length": 1761.9262865090404,
1124
- "min_seq_length": 1385,
1125
- "max_seq_length": 2562,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.9972183588317107
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 2,
1134
- "non_truncated": 1427,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 2,
1138
- "mean_seq_length": 1662.039188243527,
1139
- "min_seq_length": 1396,
1140
- "max_seq_length": 2660,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.998600419874038
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1756.9876923076922,
1154
- "min_seq_length": 1701,
1155
- "max_seq_length": 1877,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1567.3878571428572,
1169
- "min_seq_length": 1544,
1170
- "max_seq_length": 1818,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1407.764464692483,
1184
- "min_seq_length": 1141,
1185
- "max_seq_length": 1910,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 2068.3360752056406,
1199
- "min_seq_length": 2033,
1200
- "max_seq_length": 2107,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1814.2492537313433,
1214
- "min_seq_length": 1793,
1215
- "max_seq_length": 1909,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0625-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9195035639155449,
5
+ "acc,all": 0.9195261437908496,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7748240246646378,
10
+ "mse,all": 0.5014828431372549,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5507649513212796,
15
+ "acc,exam_id__UNICAMP_2024": 0.4444444444444444,
16
+ "acc,exam_id__USP_2018": 0.5,
17
+ "acc,exam_id__USP_2022": 0.5918367346938775,
18
+ "acc,exam_id__USP_2019": 0.525,
19
+ "acc,exam_id__UNICAMP_2019": 0.5,
20
+ "acc,exam_id__USP_2024": 0.7560975609756098,
21
+ "acc,exam_id__USP_2023": 0.6363636363636364,
22
+ "acc,exam_id__UNICAMP_2022": 0.5384615384615384,
23
+ "acc,exam_id__USP_2021": 0.5769230769230769,
24
+ "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
25
+ "acc,exam_id__USP_2020": 0.5535714285714286,
26
+ "acc,exam_id__UNICAMP_2021_2": 0.49019607843137253,
27
+ "acc,exam_id__UNICAMP_2018": 0.5,
28
+ "acc,exam_id__UNICAMP_2023": 0.6744186046511628,
29
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6634009797060881,
35
+ "acc,exam_id__2022": 0.6766917293233082,
36
+ "acc,exam_id__2013": 0.7129629629629629,
37
+ "acc,exam_id__2012": 0.6551724137931034,
38
+ "acc,exam_id__2010": 0.6581196581196581,
39
+ "acc,exam_id__2016_2": 0.6422764227642277,
40
+ "acc,exam_id__2023": 0.6814814814814815,
41
+ "acc,exam_id__2016": 0.6528925619834711,
42
+ "acc,exam_id__2009": 0.6086956521739131,
43
+ "acc,exam_id__2014": 0.6697247706422018,
44
+ "acc,exam_id__2017": 0.6637931034482759,
45
+ "acc,exam_id__2011": 0.7008547008547008,
46
+ "acc,exam_id__2015": 0.6386554621848739
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.8110585067106806,
50
+ "acc,all": 0.8661538461538462,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8052263390689189,
56
+ "acc,all": 0.8107142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.44419134396355353,
60
+ "acc,exam_id__2014-13": 0.425,
61
+ "acc,exam_id__2012-06": 0.45,
62
+ "acc,exam_id__2018-25": 0.4375,
63
+ "acc,exam_id__2012-09": 0.3246753246753247,
64
+ "acc,exam_id__2012-07": 0.375,
65
+ "acc,exam_id__2016-20a": 0.35,
66
+ "acc,exam_id__2011-05": 0.5375,
67
+ "acc,exam_id__2017-24": 0.45,
68
+ "acc,exam_id__2016-21": 0.4875,
69
+ "acc,exam_id__2013-10": 0.4625,
70
+ "acc,exam_id__2013-11": 0.4625,
71
+ "acc,exam_id__2017-22": 0.475,
72
+ "acc,exam_id__2010-01": 0.35294117647058826,
73
+ "acc,exam_id__2016-20": 0.45,
74
+ "acc,exam_id__2017-23": 0.4125,
75
+ "acc,exam_id__2012-08": 0.475,
76
+ "acc,exam_id__2014-14": 0.5375,
77
+ "acc,exam_id__2010-02": 0.49,
78
+ "acc,exam_id__2015-16": 0.375,
79
+ "acc,exam_id__2015-18": 0.5,
80
+ "acc,exam_id__2015-17": 0.5897435897435898,
81
+ "acc,exam_id__2011-03": 0.32323232323232326,
82
+ "acc,exam_id__2013-12": 0.5,
83
+ "acc,exam_id__2011-04": 0.3375,
84
+ "acc,exam_id__2016-19": 0.5,
85
+ "acc,exam_id__2014-15": 0.44871794871794873,
86
+ "acc,exam_id__2012-06a": 0.4875,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7276695425104325,
92
+ "acc,all": 0.7649823736780259
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6851627567981504,
96
+ "acc,all": 0.7189054726368159,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f6c7c73ab60>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f6c7c73a520>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6c7c73a7a0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f6c7c73ad40>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6c7c73afc0>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f6c7c739ee0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f6c7c73a160>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 4,
1064
+ "non_truncated": 14146,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 4,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "302e3ae0bcc50dae3fb69fc1b08b518398e8c407",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 14483505152,
1075
+ "model_num_parameters": 7241748480,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 32,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1531.7455065359477,
1094
+ "min_seq_length": 1508,
1095
+ "max_seq_length": 1598,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1740.7455065359477,
1109
+ "min_seq_length": 1717,
1110
+ "max_seq_length": 1807,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 2,
1119
+ "non_truncated": 717,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 2,
1123
+ "mean_seq_length": 1761.9262865090404,
1124
+ "min_seq_length": 1385,
1125
+ "max_seq_length": 2562,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.9972183588317107
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 2,
1134
+ "non_truncated": 1427,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 2,
1138
+ "mean_seq_length": 1662.039188243527,
1139
+ "min_seq_length": 1396,
1140
+ "max_seq_length": 2660,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.998600419874038
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1756.9876923076922,
1154
+ "min_seq_length": 1701,
1155
+ "max_seq_length": 1877,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1567.3878571428572,
1169
+ "min_seq_length": 1544,
1170
+ "max_seq_length": 1818,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1407.764464692483,
1184
+ "min_seq_length": 1141,
1185
+ "max_seq_length": 1910,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 2068.3360752056406,
1199
+ "min_seq_length": 2033,
1200
+ "max_seq_length": 2107,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1814.2492537313433,
1214
+ "min_seq_length": 1793,
1215
+ "max_seq_length": 1909,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=BAAI/Infinity-Instruct-3M-0625-Mistral-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
BAAI/Infinity-Instruct-3M-0625-Mistral-7B/results_2024-07-19T01-41-09.242433.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6900568132733055,
38
- "all_grouped_npm": 0.5432631027798649,
39
  "all_grouped": {
40
  "enem_challenge": 0.6634009797060881,
41
  "bluex": 0.5507649513212796,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.8110585067106806,
46
  "hatebr_offensive": 0.8052263390689189,
47
  "portuguese_hate_speech": 0.7276695425104325,
48
- "tweetsentbr": 0.5138720675986127
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6634009797060881,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8110585067106806,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8052263390689189,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7276695425104325,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5138720675986127
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6634009797060881,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7276695425104325
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5138720675986127,
154
  "acc,all": 0.7189054726368159,
155
- "main_score": 0.5138720675986127
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7090891120732541,
38
+ "all_grouped_npm": 0.5715849759940743,
39
  "all_grouped": {
40
  "enem_challenge": 0.6634009797060881,
41
  "bluex": 0.5507649513212796,
 
45
  "faquad_nli": 0.8110585067106806,
46
  "hatebr_offensive": 0.8052263390689189,
47
  "portuguese_hate_speech": 0.7276695425104325,
48
+ "tweetsentbr": 0.6851627567981504
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6634009797060881,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8110585067106806,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8052263390689189,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7276695425104325,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6851627567981504
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6634009797060881,
 
150
  "main_score": 0.7276695425104325
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6851627567981504,
154
  "acc,all": 0.7189054726368159,
155
+ "main_score": 0.6851627567981504
156
  }
157
  },
158
  "config_tasks": {
CohereForAI/c4ai-command-r-v01/raw_2024-04-17T00-36-42.568466/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.883132179380006,
5
- "acc,all": 0.8831699346405228,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7210331309303998,
10
- "mse,all": 0.6012867647058824,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.6203059805285118,
15
- "acc,exam_id__UNICAMP_2020": 0.6,
16
- "acc,exam_id__USP_2023": 0.75,
17
- "acc,exam_id__UNICAMP_2024": 0.6222222222222222,
18
- "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478,
19
- "acc,exam_id__USP_2020": 0.5357142857142857,
20
- "acc,exam_id__UNICAMP_2018": 0.5,
21
- "acc,exam_id__USP_2022": 0.6326530612244898,
22
- "acc,exam_id__UNICAMP_2019": 0.64,
23
- "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019,
24
- "acc,exam_id__USP_2019": 0.6,
25
- "acc,exam_id__UNICAMP_2022": 0.717948717948718,
26
- "acc,exam_id__USP_2024": 0.7804878048780488,
27
- "acc,exam_id__UNICAMP_2023": 0.7209302325581395,
28
- "acc,exam_id__USP_2018": 0.46296296296296297,
29
- "acc,exam_id__USP_2021": 0.6730769230769231,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.7158852344296711,
35
- "acc,exam_id__2009": 0.6956521739130435,
36
- "acc,exam_id__2013": 0.6851851851851852,
37
- "acc,exam_id__2010": 0.7521367521367521,
38
- "acc,exam_id__2012": 0.75,
39
- "acc,exam_id__2016_2": 0.6747967479674797,
40
- "acc,exam_id__2017": 0.7241379310344828,
41
- "acc,exam_id__2022": 0.6616541353383458,
42
- "acc,exam_id__2023": 0.7481481481481481,
43
- "acc,exam_id__2011": 0.7692307692307693,
44
- "acc,exam_id__2014": 0.7431192660550459,
45
- "acc,exam_id__2015": 0.6890756302521008,
46
- "acc,exam_id__2016": 0.7024793388429752
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.47272296015180265,
50
- "acc,all": 0.47384615384615386,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8222299935886227,
56
- "acc,all": 0.8257142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.5521640091116173,
60
- "acc,exam_id__2018-25": 0.475,
61
- "acc,exam_id__2014-14": 0.6375,
62
- "acc,exam_id__2013-11": 0.5375,
63
- "acc,exam_id__2013-12": 0.5875,
64
- "acc,exam_id__2015-16": 0.5125,
65
- "acc,exam_id__2016-19": 0.5769230769230769,
66
- "acc,exam_id__2015-17": 0.6923076923076923,
67
- "acc,exam_id__2016-21": 0.4625,
68
- "acc,exam_id__2011-03": 0.5050505050505051,
69
- "acc,exam_id__2012-08": 0.5625,
70
- "acc,exam_id__2012-07": 0.5375,
71
- "acc,exam_id__2011-04": 0.4625,
72
- "acc,exam_id__2013-10": 0.5875,
73
- "acc,exam_id__2012-06a": 0.6375,
74
- "acc,exam_id__2010-01": 0.4117647058823529,
75
- "acc,exam_id__2017-23": 0.5625,
76
- "acc,exam_id__2015-18": 0.5125,
77
- "acc,exam_id__2012-06": 0.5375,
78
- "acc,exam_id__2017-22": 0.6375,
79
- "acc,exam_id__2014-15": 0.6538461538461539,
80
- "acc,exam_id__2010-02": 0.62,
81
- "acc,exam_id__2014-13": 0.5375,
82
- "acc,exam_id__2016-20": 0.575,
83
- "acc,exam_id__2011-05": 0.5875,
84
- "acc,exam_id__2016-20a": 0.5625,
85
- "acc,exam_id__2012-09": 0.45454545454545453,
86
- "acc,exam_id__2017-24": 0.4875,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7102306144559665,
92
- "acc,all": 0.7285546415981199
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.48595613300125107,
96
- "acc,all": 0.7114427860696517,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fa6afa8cfe0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fa6afa8c9a0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa6afa8cc20>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fa6afa8d1c0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa6afa8d440>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fa6afa8c360>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fa6afa8c5e0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "16881ccde1c68bbc7041280e6a66637bc46bfe88",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 69961672704,
1075
- "model_num_parameters": 34980831232,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 1,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1038.3545751633987,
1094
- "min_seq_length": 1022,
1095
- "max_seq_length": 1082,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1283.3545751633987,
1109
- "min_seq_length": 1267,
1110
- "max_seq_length": 1327,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1355.8970792767732,
1124
- "min_seq_length": 1076,
1125
- "max_seq_length": 1949,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1217.5381385584324,
1139
- "min_seq_length": 1027,
1140
- "max_seq_length": 2206,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1138.0676923076924,
1154
- "min_seq_length": 1102,
1155
- "max_seq_length": 1205,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1095.9871428571428,
1169
- "min_seq_length": 1079,
1170
- "max_seq_length": 1283,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 970.0168564920274,
1184
- "min_seq_length": 784,
1185
- "max_seq_length": 1291,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1454.6169212690952,
1199
- "min_seq_length": 1427,
1200
- "max_seq_length": 1491,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1420.0830845771145,
1214
- "min_seq_length": 1404,
1215
- "max_seq_length": 1470,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=CohereForAI/c4ai-command-r-v01,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "0e4d6ae"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.883132179380006,
5
+ "acc,all": 0.8831699346405228,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7210331309303998,
10
+ "mse,all": 0.6012867647058824,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.6203059805285118,
15
+ "acc,exam_id__UNICAMP_2020": 0.6,
16
+ "acc,exam_id__USP_2023": 0.75,
17
+ "acc,exam_id__UNICAMP_2024": 0.6222222222222222,
18
+ "acc,exam_id__UNICAMP_2021_1": 0.5652173913043478,
19
+ "acc,exam_id__USP_2020": 0.5357142857142857,
20
+ "acc,exam_id__UNICAMP_2018": 0.5,
21
+ "acc,exam_id__USP_2022": 0.6326530612244898,
22
+ "acc,exam_id__UNICAMP_2019": 0.64,
23
+ "acc,exam_id__UNICAMP_2021_2": 0.6078431372549019,
24
+ "acc,exam_id__USP_2019": 0.6,
25
+ "acc,exam_id__UNICAMP_2022": 0.717948717948718,
26
+ "acc,exam_id__USP_2024": 0.7804878048780488,
27
+ "acc,exam_id__UNICAMP_2023": 0.7209302325581395,
28
+ "acc,exam_id__USP_2018": 0.46296296296296297,
29
+ "acc,exam_id__USP_2021": 0.6730769230769231,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.7158852344296711,
35
+ "acc,exam_id__2009": 0.6956521739130435,
36
+ "acc,exam_id__2013": 0.6851851851851852,
37
+ "acc,exam_id__2010": 0.7521367521367521,
38
+ "acc,exam_id__2012": 0.75,
39
+ "acc,exam_id__2016_2": 0.6747967479674797,
40
+ "acc,exam_id__2017": 0.7241379310344828,
41
+ "acc,exam_id__2022": 0.6616541353383458,
42
+ "acc,exam_id__2023": 0.7481481481481481,
43
+ "acc,exam_id__2011": 0.7692307692307693,
44
+ "acc,exam_id__2014": 0.7431192660550459,
45
+ "acc,exam_id__2015": 0.6890756302521008,
46
+ "acc,exam_id__2016": 0.7024793388429752
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.47272296015180265,
50
+ "acc,all": 0.47384615384615386,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8222299935886227,
56
+ "acc,all": 0.8257142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.5521640091116173,
60
+ "acc,exam_id__2018-25": 0.475,
61
+ "acc,exam_id__2014-14": 0.6375,
62
+ "acc,exam_id__2013-11": 0.5375,
63
+ "acc,exam_id__2013-12": 0.5875,
64
+ "acc,exam_id__2015-16": 0.5125,
65
+ "acc,exam_id__2016-19": 0.5769230769230769,
66
+ "acc,exam_id__2015-17": 0.6923076923076923,
67
+ "acc,exam_id__2016-21": 0.4625,
68
+ "acc,exam_id__2011-03": 0.5050505050505051,
69
+ "acc,exam_id__2012-08": 0.5625,
70
+ "acc,exam_id__2012-07": 0.5375,
71
+ "acc,exam_id__2011-04": 0.4625,
72
+ "acc,exam_id__2013-10": 0.5875,
73
+ "acc,exam_id__2012-06a": 0.6375,
74
+ "acc,exam_id__2010-01": 0.4117647058823529,
75
+ "acc,exam_id__2017-23": 0.5625,
76
+ "acc,exam_id__2015-18": 0.5125,
77
+ "acc,exam_id__2012-06": 0.5375,
78
+ "acc,exam_id__2017-22": 0.6375,
79
+ "acc,exam_id__2014-15": 0.6538461538461539,
80
+ "acc,exam_id__2010-02": 0.62,
81
+ "acc,exam_id__2014-13": 0.5375,
82
+ "acc,exam_id__2016-20": 0.575,
83
+ "acc,exam_id__2011-05": 0.5875,
84
+ "acc,exam_id__2016-20a": 0.5625,
85
+ "acc,exam_id__2012-09": 0.45454545454545453,
86
+ "acc,exam_id__2017-24": 0.4875,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7102306144559665,
92
+ "acc,all": 0.7285546415981199
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6479415106683347,
96
+ "acc,all": 0.7114427860696517,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fa6afa8cfe0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fa6afa8c9a0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa6afa8cc20>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fa6afa8d1c0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fa6afa8d440>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fa6afa8c360>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fa6afa8c5e0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "16881ccde1c68bbc7041280e6a66637bc46bfe88",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 69961672704,
1075
+ "model_num_parameters": 34980831232,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 1,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1038.3545751633987,
1094
+ "min_seq_length": 1022,
1095
+ "max_seq_length": 1082,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1283.3545751633987,
1109
+ "min_seq_length": 1267,
1110
+ "max_seq_length": 1327,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1355.8970792767732,
1124
+ "min_seq_length": 1076,
1125
+ "max_seq_length": 1949,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1217.5381385584324,
1139
+ "min_seq_length": 1027,
1140
+ "max_seq_length": 2206,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1138.0676923076924,
1154
+ "min_seq_length": 1102,
1155
+ "max_seq_length": 1205,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1095.9871428571428,
1169
+ "min_seq_length": 1079,
1170
+ "max_seq_length": 1283,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 970.0168564920274,
1184
+ "min_seq_length": 784,
1185
+ "max_seq_length": 1291,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1454.6169212690952,
1199
+ "min_seq_length": 1427,
1200
+ "max_seq_length": 1491,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1420.0830845771145,
1214
+ "min_seq_length": 1404,
1215
+ "max_seq_length": 1470,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=CohereForAI/c4ai-command-r-v01,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "0e4d6ae"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
CohereForAI/c4ai-command-r-v01/results_2024-04-17T00-36-42.568466.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6648511372864276,
38
- "all_grouped_npm": 0.4887985400400404,
39
  "all_grouped": {
40
  "enem_challenge": 0.7158852344296711,
41
  "bluex": 0.6203059805285118,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.47272296015180265,
46
  "hatebr_offensive": 0.8222299935886227,
47
  "portuguese_hate_speech": 0.7102306144559665,
48
- "tweetsentbr": 0.48595613300125107
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7158852344296711,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.47272296015180265,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8222299935886227,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7102306144559665,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.48595613300125107
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7158852344296711,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7102306144559665
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.48595613300125107,
154
  "acc,all": 0.7114427860696517,
155
- "main_score": 0.48595613300125107
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6828495125827703,
38
+ "all_grouped_npm": 0.5155818366119789,
39
  "all_grouped": {
40
  "enem_challenge": 0.7158852344296711,
41
  "bluex": 0.6203059805285118,
 
45
  "faquad_nli": 0.47272296015180265,
46
  "hatebr_offensive": 0.8222299935886227,
47
  "portuguese_hate_speech": 0.7102306144559665,
48
+ "tweetsentbr": 0.6479415106683347
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7158852344296711,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.47272296015180265,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8222299935886227,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7102306144559665,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6479415106683347
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7158852344296711,
 
150
  "main_score": 0.7102306144559665
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6479415106683347,
154
  "acc,all": 0.7114427860696517,
155
+ "main_score": 0.6479415106683347
156
  }
157
  },
158
  "config_tasks": {
Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/raw_2024-07-15T01-32-05.828202/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9193834267092047,
5
- "acc,all": 0.9195261437908496,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7172104868084787,
10
- "mse,all": 0.6943019494681872,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.48817802503477054,
15
- "acc,exam_id__USP_2022": 0.4897959183673469,
16
- "acc,exam_id__USP_2019": 0.475,
17
- "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
18
- "acc,exam_id__UNICAMP_2022": 0.46153846153846156,
19
- "acc,exam_id__UNICAMP_2018": 0.37037037037037035,
20
- "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216,
21
- "acc,exam_id__UNICAMP_2020": 0.509090909090909,
22
- "acc,exam_id__USP_2020": 0.5357142857142857,
23
- "acc,exam_id__UNICAMP_2024": 0.5333333333333333,
24
- "acc,exam_id__UNICAMP_2019": 0.48,
25
- "acc,exam_id__USP_2021": 0.5384615384615384,
26
- "acc,exam_id__UNICAMP_2023": 0.4418604651162791,
27
- "acc,exam_id__USP_2018": 0.48148148148148145,
28
- "acc,exam_id__USP_2024": 0.6097560975609756,
29
- "acc,exam_id__USP_2023": 0.5909090909090909,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6368089573128062,
35
- "acc,exam_id__2011": 0.7094017094017094,
36
- "acc,exam_id__2015": 0.680672268907563,
37
- "acc,exam_id__2017": 0.5948275862068966,
38
- "acc,exam_id__2016": 0.6198347107438017,
39
- "acc,exam_id__2016_2": 0.5528455284552846,
40
- "acc,exam_id__2014": 0.6697247706422018,
41
- "acc,exam_id__2023": 0.6148148148148148,
42
- "acc,exam_id__2013": 0.6203703703703703,
43
- "acc,exam_id__2010": 0.6410256410256411,
44
- "acc,exam_id__2009": 0.6782608695652174,
45
- "acc,exam_id__2022": 0.6390977443609023,
46
- "acc,exam_id__2012": 0.6293103448275862
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.48579447418078225,
50
- "acc,all": 0.7630769230769231,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.5709864973474453,
56
- "acc,all": 0.8535714285714285
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.43143507972665146,
60
- "acc,exam_id__2012-06": 0.4125,
61
- "acc,exam_id__2016-21": 0.325,
62
- "acc,exam_id__2013-11": 0.375,
63
- "acc,exam_id__2012-07": 0.45,
64
- "acc,exam_id__2010-02": 0.46,
65
- "acc,exam_id__2012-09": 0.4805194805194805,
66
- "acc,exam_id__2016-19": 0.4230769230769231,
67
- "acc,exam_id__2011-05": 0.4625,
68
- "acc,exam_id__2014-13": 0.3875,
69
- "acc,exam_id__2011-03": 0.3838383838383838,
70
- "acc,exam_id__2013-10": 0.4125,
71
- "acc,exam_id__2017-23": 0.4625,
72
- "acc,exam_id__2015-16": 0.3125,
73
- "acc,exam_id__2017-22": 0.5125,
74
- "acc,exam_id__2010-01": 0.35294117647058826,
75
- "acc,exam_id__2014-15": 0.6025641025641025,
76
- "acc,exam_id__2016-20a": 0.4125,
77
- "acc,exam_id__2012-08": 0.4125,
78
- "acc,exam_id__2018-25": 0.475,
79
- "acc,exam_id__2017-24": 0.3625,
80
- "acc,exam_id__2014-14": 0.5375,
81
- "acc,exam_id__2015-17": 0.5512820512820513,
82
- "acc,exam_id__2012-06a": 0.425,
83
- "acc,exam_id__2016-20": 0.4375,
84
- "acc,exam_id__2013-12": 0.5,
85
- "acc,exam_id__2015-18": 0.4375,
86
- "acc,exam_id__2011-04": 0.3,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.475879953813593,
92
- "acc,all": 0.7379553466509988
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5334393438436622,
96
- "acc,all": 0.7338308457711443,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f783ec42b60>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f783ec42520>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f783ec427a0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f783ec42d40>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f783ec42fc0>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f783ec41ee0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f783ec42160>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "d947400efa0d824ac158c5e41bbe1dbed398d257",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 16060530688,
1075
- "model_num_parameters": 8030261248,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 8,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1317.5322712418301,
1094
- "min_seq_length": 1298,
1095
- "max_seq_length": 1381,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1508.5322712418301,
1109
- "min_seq_length": 1489,
1110
- "max_seq_length": 1572,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1483.7719054242002,
1124
- "min_seq_length": 1164,
1125
- "max_seq_length": 2133,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1411.3547935619315,
1139
- "min_seq_length": 1186,
1140
- "max_seq_length": 2339,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1446.8215384615385,
1154
- "min_seq_length": 1401,
1155
- "max_seq_length": 1543,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1278.3878571428572,
1169
- "min_seq_length": 1258,
1170
- "max_seq_length": 1497,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1219.3772209567198,
1184
- "min_seq_length": 987,
1185
- "max_seq_length": 1653,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1675.4195064629848,
1199
- "min_seq_length": 1645,
1200
- "max_seq_length": 1707,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1536.1537313432837,
1214
- "min_seq_length": 1519,
1215
- "max_seq_length": 1584,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9193834267092047,
5
+ "acc,all": 0.9195261437908496,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7172104868084787,
10
+ "mse,all": 0.6943019494681872,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.48817802503477054,
15
+ "acc,exam_id__USP_2022": 0.4897959183673469,
16
+ "acc,exam_id__USP_2019": 0.475,
17
+ "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
18
+ "acc,exam_id__UNICAMP_2022": 0.46153846153846156,
19
+ "acc,exam_id__UNICAMP_2018": 0.37037037037037035,
20
+ "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216,
21
+ "acc,exam_id__UNICAMP_2020": 0.509090909090909,
22
+ "acc,exam_id__USP_2020": 0.5357142857142857,
23
+ "acc,exam_id__UNICAMP_2024": 0.5333333333333333,
24
+ "acc,exam_id__UNICAMP_2019": 0.48,
25
+ "acc,exam_id__USP_2021": 0.5384615384615384,
26
+ "acc,exam_id__UNICAMP_2023": 0.4418604651162791,
27
+ "acc,exam_id__USP_2018": 0.48148148148148145,
28
+ "acc,exam_id__USP_2024": 0.6097560975609756,
29
+ "acc,exam_id__USP_2023": 0.5909090909090909,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6368089573128062,
35
+ "acc,exam_id__2011": 0.7094017094017094,
36
+ "acc,exam_id__2015": 0.680672268907563,
37
+ "acc,exam_id__2017": 0.5948275862068966,
38
+ "acc,exam_id__2016": 0.6198347107438017,
39
+ "acc,exam_id__2016_2": 0.5528455284552846,
40
+ "acc,exam_id__2014": 0.6697247706422018,
41
+ "acc,exam_id__2023": 0.6148148148148148,
42
+ "acc,exam_id__2013": 0.6203703703703703,
43
+ "acc,exam_id__2010": 0.6410256410256411,
44
+ "acc,exam_id__2009": 0.6782608695652174,
45
+ "acc,exam_id__2022": 0.6390977443609023,
46
+ "acc,exam_id__2012": 0.6293103448275862
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7286917112711735,
50
+ "acc,all": 0.7630769230769231,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8564797460211679,
56
+ "acc,all": 0.8535714285714285
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.43143507972665146,
60
+ "acc,exam_id__2012-06": 0.4125,
61
+ "acc,exam_id__2016-21": 0.325,
62
+ "acc,exam_id__2013-11": 0.375,
63
+ "acc,exam_id__2012-07": 0.45,
64
+ "acc,exam_id__2010-02": 0.46,
65
+ "acc,exam_id__2012-09": 0.4805194805194805,
66
+ "acc,exam_id__2016-19": 0.4230769230769231,
67
+ "acc,exam_id__2011-05": 0.4625,
68
+ "acc,exam_id__2014-13": 0.3875,
69
+ "acc,exam_id__2011-03": 0.3838383838383838,
70
+ "acc,exam_id__2013-10": 0.4125,
71
+ "acc,exam_id__2017-23": 0.4625,
72
+ "acc,exam_id__2015-16": 0.3125,
73
+ "acc,exam_id__2017-22": 0.5125,
74
+ "acc,exam_id__2010-01": 0.35294117647058826,
75
+ "acc,exam_id__2014-15": 0.6025641025641025,
76
+ "acc,exam_id__2016-20a": 0.4125,
77
+ "acc,exam_id__2012-08": 0.4125,
78
+ "acc,exam_id__2018-25": 0.475,
79
+ "acc,exam_id__2017-24": 0.3625,
80
+ "acc,exam_id__2014-14": 0.5375,
81
+ "acc,exam_id__2015-17": 0.5512820512820513,
82
+ "acc,exam_id__2012-06a": 0.425,
83
+ "acc,exam_id__2016-20": 0.4375,
84
+ "acc,exam_id__2013-12": 0.5,
85
+ "acc,exam_id__2015-18": 0.4375,
86
+ "acc,exam_id__2011-04": 0.3,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7138199307203894,
92
+ "acc,all": 0.7379553466509988
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.7112524584582163,
96
+ "acc,all": 0.7338308457711443,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f783ec42b60>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f783ec42520>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f783ec427a0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f783ec42d40>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f783ec42fc0>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f783ec41ee0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f783ec42160>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "d947400efa0d824ac158c5e41bbe1dbed398d257",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 16060530688,
1075
+ "model_num_parameters": 8030261248,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 8,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1317.5322712418301,
1094
+ "min_seq_length": 1298,
1095
+ "max_seq_length": 1381,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1508.5322712418301,
1109
+ "min_seq_length": 1489,
1110
+ "max_seq_length": 1572,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1483.7719054242002,
1124
+ "min_seq_length": 1164,
1125
+ "max_seq_length": 2133,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1411.3547935619315,
1139
+ "min_seq_length": 1186,
1140
+ "max_seq_length": 2339,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1446.8215384615385,
1154
+ "min_seq_length": 1401,
1155
+ "max_seq_length": 1543,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1278.3878571428572,
1169
+ "min_seq_length": 1258,
1170
+ "max_seq_length": 1497,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1219.3772209567198,
1184
+ "min_seq_length": 987,
1185
+ "max_seq_length": 1653,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1675.4195064629848,
1199
+ "min_seq_length": 1645,
1200
+ "max_seq_length": 1707,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1536.1537313432837,
1214
+ "min_seq_length": 1519,
1215
+ "max_seq_length": 1584,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
Columbia-NLP/LION-LLaMA-3-8b-odpo-v1.0/results_2024-07-15T01-32-05.828202.json CHANGED
@@ -34,18 +34,18 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.5843462494197105,
38
- "all_grouped_npm": 0.3533281678439427,
39
  "all_grouped": {
40
  "enem_challenge": 0.6368089573128062,
41
  "bluex": 0.48817802503477054,
42
  "oab_exams": 0.43143507972665146,
43
  "assin2_rte": 0.9193834267092047,
44
  "assin2_sts": 0.7172104868084787,
45
- "faquad_nli": 0.48579447418078225,
46
- "hatebr_offensive": 0.5709864973474453,
47
- "portuguese_hate_speech": 0.475879953813593,
48
- "tweetsentbr": 0.5334393438436622
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6368089573128062,
@@ -53,10 +53,10 @@
53
  "harness|oab_exams|oab_exams|None|3": 0.43143507972665146,
54
  "harness|assin2_rte|assin2_rte|None|15": 0.9193834267092047,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.7172104868084787,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.48579447418078225,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.5709864973474453,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.475879953813593,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5334393438436622
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6368089573128062,
@@ -135,24 +135,24 @@
135
  "main_score": 0.7172104868084787
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.48579447418078225,
139
  "acc,all": 0.7630769230769231,
140
- "main_score": 0.48579447418078225
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.5709864973474453,
144
  "acc,all": 0.8535714285714285,
145
- "main_score": 0.5709864973474453
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.475879953813593,
149
  "acc,all": 0.7379553466509988,
150
- "main_score": 0.475879953813593
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5334393438436622,
154
  "acc,all": 0.7338308457711443,
155
- "main_score": 0.5334393438436622
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6892510913403176,
38
+ "all_grouped_npm": 0.546527082582236,
39
  "all_grouped": {
40
  "enem_challenge": 0.6368089573128062,
41
  "bluex": 0.48817802503477054,
42
  "oab_exams": 0.43143507972665146,
43
  "assin2_rte": 0.9193834267092047,
44
  "assin2_sts": 0.7172104868084787,
45
+ "faquad_nli": 0.7286917112711735,
46
+ "hatebr_offensive": 0.8564797460211679,
47
+ "portuguese_hate_speech": 0.7138199307203894,
48
+ "tweetsentbr": 0.7112524584582163
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6368089573128062,
 
53
  "harness|oab_exams|oab_exams|None|3": 0.43143507972665146,
54
  "harness|assin2_rte|assin2_rte|None|15": 0.9193834267092047,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.7172104868084787,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.7286917112711735,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8564797460211679,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7138199307203894,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.7112524584582163
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6368089573128062,
 
135
  "main_score": 0.7172104868084787
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.7286917112711735,
139
  "acc,all": 0.7630769230769231,
140
+ "main_score": 0.7286917112711735
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.8564797460211679,
144
  "acc,all": 0.8535714285714285,
145
+ "main_score": 0.8564797460211679
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.7138199307203894,
149
  "acc,all": 0.7379553466509988,
150
+ "main_score": 0.7138199307203894
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.7112524584582163,
154
  "acc,all": 0.7338308457711443,
155
+ "main_score": 0.7112524584582163
156
  }
157
  },
158
  "config_tasks": {
CultriX/NeuralMona_MoE-4x7B/raw_2024-05-26T13-29-26.736769/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9244279910791389,
5
- "acc,all": 0.9244281045751634,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7720274719342004,
10
- "mse,all": 0.4360906862745098,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5340751043115438,
15
- "acc,exam_id__UNICAMP_2018": 0.48148148148148145,
16
- "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
17
- "acc,exam_id__USP_2023": 0.6136363636363636,
18
- "acc,exam_id__UNICAMP_2024": 0.4888888888888889,
19
- "acc,exam_id__USP_2024": 0.7317073170731707,
20
- "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
21
- "acc,exam_id__USP_2020": 0.5178571428571429,
22
- "acc,exam_id__UNICAMP_2020": 0.5636363636363636,
23
- "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
24
- "acc,exam_id__UNICAMP_2019": 0.54,
25
- "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
26
- "acc,exam_id__USP_2018": 0.5,
27
- "acc,exam_id__USP_2021": 0.46153846153846156,
28
- "acc,exam_id__USP_2019": 0.425,
29
- "acc,exam_id__USP_2022": 0.46938775510204084,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6312106368089573,
35
- "acc,exam_id__2011": 0.6410256410256411,
36
- "acc,exam_id__2017": 0.6379310344827587,
37
- "acc,exam_id__2015": 0.6218487394957983,
38
- "acc,exam_id__2016": 0.5537190082644629,
39
- "acc,exam_id__2016_2": 0.6341463414634146,
40
- "acc,exam_id__2009": 0.6173913043478261,
41
- "acc,exam_id__2012": 0.6379310344827587,
42
- "acc,exam_id__2010": 0.7008547008547008,
43
- "acc,exam_id__2013": 0.6851851851851852,
44
- "acc,exam_id__2014": 0.6055045871559633,
45
- "acc,exam_id__2022": 0.6090225563909775,
46
- "acc,exam_id__2023": 0.6370370370370371
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7694314032342202,
50
- "acc,all": 0.816923076923077,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8409826856991804,
56
- "acc,all": 0.8428571428571429
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.4214123006833713,
60
- "acc,exam_id__2011-03": 0.3333333333333333,
61
- "acc,exam_id__2014-13": 0.325,
62
- "acc,exam_id__2013-10": 0.4125,
63
- "acc,exam_id__2017-24": 0.4,
64
- "acc,exam_id__2017-22": 0.5375,
65
- "acc,exam_id__2012-06a": 0.375,
66
- "acc,exam_id__2016-20a": 0.3375,
67
- "acc,exam_id__2012-09": 0.38961038961038963,
68
- "acc,exam_id__2015-16": 0.4125,
69
- "acc,exam_id__2011-04": 0.4125,
70
- "acc,exam_id__2012-07": 0.3625,
71
- "acc,exam_id__2014-14": 0.5375,
72
- "acc,exam_id__2014-15": 0.46153846153846156,
73
- "acc,exam_id__2010-02": 0.44,
74
- "acc,exam_id__2015-18": 0.4125,
75
- "acc,exam_id__2016-19": 0.5128205128205128,
76
- "acc,exam_id__2012-06": 0.4625,
77
- "acc,exam_id__2013-12": 0.425,
78
- "acc,exam_id__2011-05": 0.4875,
79
- "acc,exam_id__2017-23": 0.3875,
80
- "acc,exam_id__2013-11": 0.475,
81
- "acc,exam_id__2016-20": 0.4,
82
- "acc,exam_id__2016-21": 0.3625,
83
- "acc,exam_id__2018-25": 0.4375,
84
- "acc,exam_id__2010-01": 0.3764705882352941,
85
- "acc,exam_id__2012-08": 0.4125,
86
- "acc,exam_id__2015-17": 0.5128205128205128,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6819724557061289,
92
- "acc,all": 0.7132784958871915
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.4835212271941399,
96
- "acc,all": 0.6955223880597015,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f80d069a200>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f80d0699bc0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f80d0699e40>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f80d069a3e0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f80d069a660>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f80d0699580>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f80d0699800>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": false,
1069
- "chat_type": null,
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "869c5cafb3f5002a0d273621519e3f352418eded",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 48844259328,
1075
- "model_num_parameters": 24153690112,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 16,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1369.7455065359477,
1094
- "min_seq_length": 1346,
1095
- "max_seq_length": 1436,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1593.7455065359477,
1109
- "min_seq_length": 1570,
1110
- "max_seq_length": 1660,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1719.9262865090404,
1124
- "min_seq_length": 1343,
1125
- "max_seq_length": 2520,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1620.039188243527,
1139
- "min_seq_length": 1354,
1140
- "max_seq_length": 2618,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1594.9876923076922,
1154
- "min_seq_length": 1539,
1155
- "max_seq_length": 1715,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1305.3878571428572,
1169
- "min_seq_length": 1282,
1170
- "max_seq_length": 1556,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1365.764464692483,
1184
- "min_seq_length": 1099,
1185
- "max_seq_length": 1868,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1806.3360752056403,
1199
- "min_seq_length": 1771,
1200
- "max_seq_length": 1845,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1552.2492537313433,
1214
- "min_seq_length": 1531,
1215
- "max_seq_length": 1647,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=CultriX/NeuralMona_MoE-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9244279910791389,
5
+ "acc,all": 0.9244281045751634,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7720274719342004,
10
+ "mse,all": 0.4360906862745098,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5340751043115438,
15
+ "acc,exam_id__UNICAMP_2018": 0.48148148148148145,
16
+ "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
17
+ "acc,exam_id__USP_2023": 0.6136363636363636,
18
+ "acc,exam_id__UNICAMP_2024": 0.4888888888888889,
19
+ "acc,exam_id__USP_2024": 0.7317073170731707,
20
+ "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
21
+ "acc,exam_id__USP_2020": 0.5178571428571429,
22
+ "acc,exam_id__UNICAMP_2020": 0.5636363636363636,
23
+ "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
24
+ "acc,exam_id__UNICAMP_2019": 0.54,
25
+ "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
26
+ "acc,exam_id__USP_2018": 0.5,
27
+ "acc,exam_id__USP_2021": 0.46153846153846156,
28
+ "acc,exam_id__USP_2019": 0.425,
29
+ "acc,exam_id__USP_2022": 0.46938775510204084,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6312106368089573,
35
+ "acc,exam_id__2011": 0.6410256410256411,
36
+ "acc,exam_id__2017": 0.6379310344827587,
37
+ "acc,exam_id__2015": 0.6218487394957983,
38
+ "acc,exam_id__2016": 0.5537190082644629,
39
+ "acc,exam_id__2016_2": 0.6341463414634146,
40
+ "acc,exam_id__2009": 0.6173913043478261,
41
+ "acc,exam_id__2012": 0.6379310344827587,
42
+ "acc,exam_id__2010": 0.7008547008547008,
43
+ "acc,exam_id__2013": 0.6851851851851852,
44
+ "acc,exam_id__2014": 0.6055045871559633,
45
+ "acc,exam_id__2022": 0.6090225563909775,
46
+ "acc,exam_id__2023": 0.6370370370370371
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7694314032342202,
50
+ "acc,all": 0.816923076923077,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8409826856991804,
56
+ "acc,all": 0.8428571428571429
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.4214123006833713,
60
+ "acc,exam_id__2011-03": 0.3333333333333333,
61
+ "acc,exam_id__2014-13": 0.325,
62
+ "acc,exam_id__2013-10": 0.4125,
63
+ "acc,exam_id__2017-24": 0.4,
64
+ "acc,exam_id__2017-22": 0.5375,
65
+ "acc,exam_id__2012-06a": 0.375,
66
+ "acc,exam_id__2016-20a": 0.3375,
67
+ "acc,exam_id__2012-09": 0.38961038961038963,
68
+ "acc,exam_id__2015-16": 0.4125,
69
+ "acc,exam_id__2011-04": 0.4125,
70
+ "acc,exam_id__2012-07": 0.3625,
71
+ "acc,exam_id__2014-14": 0.5375,
72
+ "acc,exam_id__2014-15": 0.46153846153846156,
73
+ "acc,exam_id__2010-02": 0.44,
74
+ "acc,exam_id__2015-18": 0.4125,
75
+ "acc,exam_id__2016-19": 0.5128205128205128,
76
+ "acc,exam_id__2012-06": 0.4625,
77
+ "acc,exam_id__2013-12": 0.425,
78
+ "acc,exam_id__2011-05": 0.4875,
79
+ "acc,exam_id__2017-23": 0.3875,
80
+ "acc,exam_id__2013-11": 0.475,
81
+ "acc,exam_id__2016-20": 0.4,
82
+ "acc,exam_id__2016-21": 0.3625,
83
+ "acc,exam_id__2018-25": 0.4375,
84
+ "acc,exam_id__2010-01": 0.3764705882352941,
85
+ "acc,exam_id__2012-08": 0.4125,
86
+ "acc,exam_id__2015-17": 0.5128205128205128,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6819724557061289,
92
+ "acc,all": 0.7132784958871915
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6446949695921868,
96
+ "acc,all": 0.6955223880597015,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f80d069a200>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f80d0699bc0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f80d0699e40>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f80d069a3e0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f80d069a660>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f80d0699580>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f80d0699800>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": false,
1069
+ "chat_type": null,
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "869c5cafb3f5002a0d273621519e3f352418eded",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 48844259328,
1075
+ "model_num_parameters": 24153690112,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 16,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1369.7455065359477,
1094
+ "min_seq_length": 1346,
1095
+ "max_seq_length": 1436,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1593.7455065359477,
1109
+ "min_seq_length": 1570,
1110
+ "max_seq_length": 1660,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1719.9262865090404,
1124
+ "min_seq_length": 1343,
1125
+ "max_seq_length": 2520,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1620.039188243527,
1139
+ "min_seq_length": 1354,
1140
+ "max_seq_length": 2618,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1594.9876923076922,
1154
+ "min_seq_length": 1539,
1155
+ "max_seq_length": 1715,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1305.3878571428572,
1169
+ "min_seq_length": 1282,
1170
+ "max_seq_length": 1556,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1365.764464692483,
1184
+ "min_seq_length": 1099,
1185
+ "max_seq_length": 1868,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1806.3360752056403,
1199
+ "min_seq_length": 1771,
1200
+ "max_seq_length": 1845,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1552.2492537313433,
1214
+ "min_seq_length": 1531,
1215
+ "max_seq_length": 1647,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=CultriX/NeuralMona_MoE-4x7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
CultriX/NeuralMona_MoE-4x7B/results_2024-05-26T13-29-26.736769.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6732290307389868,
38
- "all_grouped_npm": 0.5184879950941655,
39
  "all_grouped": {
40
  "enem_challenge": 0.6312106368089573,
41
  "bluex": 0.5340751043115438,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7694314032342202,
46
  "hatebr_offensive": 0.8409826856991804,
47
  "portuguese_hate_speech": 0.6819724557061289,
48
- "tweetsentbr": 0.4835212271941399
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6312106368089573,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7694314032342202,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8409826856991804,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6819724557061289,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.4835212271941399
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6312106368089573,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6819724557061289
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.4835212271941399,
154
  "acc,all": 0.6955223880597015,
155
- "main_score": 0.4835212271941399
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6911372243387697,
38
+ "all_grouped_npm": 0.5451370927128902,
39
  "all_grouped": {
40
  "enem_challenge": 0.6312106368089573,
41
  "bluex": 0.5340751043115438,
 
45
  "faquad_nli": 0.7694314032342202,
46
  "hatebr_offensive": 0.8409826856991804,
47
  "portuguese_hate_speech": 0.6819724557061289,
48
+ "tweetsentbr": 0.6446949695921868
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6312106368089573,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7694314032342202,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8409826856991804,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6819724557061289,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6446949695921868
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6312106368089573,
 
150
  "main_score": 0.6819724557061289
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6446949695921868,
154
  "acc,all": 0.6955223880597015,
155
+ "main_score": 0.6446949695921868
156
  }
157
  },
158
  "config_tasks": {
Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/raw_2024-08-08T02-43-35.640819/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9259642329554806,
5
- "acc,all": 0.926062091503268,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.714480317302389,
10
- "mse,all": 0.9154340392156861,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.6578581363004172,
15
- "acc,exam_id__USP_2018": 0.5370370370370371,
16
- "acc,exam_id__USP_2020": 0.6785714285714286,
17
- "acc,exam_id__UNICAMP_2020": 0.6909090909090909,
18
- "acc,exam_id__UNICAMP_2019": 0.7,
19
- "acc,exam_id__UNICAMP_2022": 0.7948717948717948,
20
- "acc,exam_id__USP_2021": 0.6538461538461539,
21
- "acc,exam_id__USP_2019": 0.625,
22
- "acc,exam_id__USP_2022": 0.673469387755102,
23
- "acc,exam_id__UNICAMP_2024": 0.6222222222222222,
24
- "acc,exam_id__USP_2023": 0.75,
25
- "acc,exam_id__UNICAMP_2018": 0.5370370370370371,
26
- "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305,
27
- "acc,exam_id__UNICAMP_2023": 0.7209302325581395,
28
- "acc,exam_id__USP_2024": 0.8048780487804879,
29
- "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.7319804058782365,
35
- "acc,exam_id__2016_2": 0.7317073170731707,
36
- "acc,exam_id__2016": 0.6859504132231405,
37
- "acc,exam_id__2014": 0.7614678899082569,
38
- "acc,exam_id__2011": 0.8205128205128205,
39
- "acc,exam_id__2015": 0.7142857142857143,
40
- "acc,exam_id__2012": 0.7413793103448276,
41
- "acc,exam_id__2013": 0.6481481481481481,
42
- "acc,exam_id__2010": 0.7435897435897436,
43
- "acc,exam_id__2017": 0.7672413793103449,
44
- "acc,exam_id__2009": 0.7043478260869566,
45
- "acc,exam_id__2023": 0.7777777777777778,
46
- "acc,exam_id__2022": 0.6842105263157895
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.6906170752324599,
50
- "acc,all": 0.7184615384615385,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8460180802244769,
56
- "acc,all": 0.8478571428571429
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.510250569476082,
60
- "acc,exam_id__2013-10": 0.525,
61
- "acc,exam_id__2012-09": 0.45454545454545453,
62
- "acc,exam_id__2010-02": 0.58,
63
- "acc,exam_id__2015-18": 0.5,
64
- "acc,exam_id__2018-25": 0.425,
65
- "acc,exam_id__2015-17": 0.5769230769230769,
66
- "acc,exam_id__2011-03": 0.47474747474747475,
67
- "acc,exam_id__2016-20": 0.5125,
68
- "acc,exam_id__2011-04": 0.4125,
69
- "acc,exam_id__2012-06a": 0.5625,
70
- "acc,exam_id__2012-08": 0.5,
71
- "acc,exam_id__2011-05": 0.5625,
72
- "acc,exam_id__2014-15": 0.5512820512820513,
73
- "acc,exam_id__2012-06": 0.525,
74
- "acc,exam_id__2013-12": 0.5625,
75
- "acc,exam_id__2010-01": 0.38823529411764707,
76
- "acc,exam_id__2012-07": 0.575,
77
- "acc,exam_id__2016-19": 0.5128205128205128,
78
- "acc,exam_id__2016-21": 0.4375,
79
- "acc,exam_id__2017-23": 0.4875,
80
- "acc,exam_id__2017-24": 0.55,
81
- "acc,exam_id__2016-20a": 0.45,
82
- "acc,exam_id__2015-16": 0.55,
83
- "acc,exam_id__2013-11": 0.475,
84
- "acc,exam_id__2017-22": 0.55,
85
- "acc,exam_id__2014-14": 0.525,
86
- "acc,exam_id__2014-13": 0.55,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7355214633181597,
92
- "acc,all": 0.7802585193889542
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5025508930131336,
96
- "acc,all": 0.7069651741293532,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f39df64a8e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f39df64a2a0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f39df64a520>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f39df64aac0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f39df64ad40>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f39df649c60>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f39df649ee0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "b749dbcb19901b8fd0e9f38c923a24533569f895",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 27920486400,
1075
- "model_num_parameters": 13960238080,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 16,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1268.9889705882354,
1094
- "min_seq_length": 1246,
1095
- "max_seq_length": 1335,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1499.9889705882354,
1109
- "min_seq_length": 1477,
1110
- "max_seq_length": 1566,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1609.7426981919332,
1124
- "min_seq_length": 1243,
1125
- "max_seq_length": 2369,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1488.9881035689293,
1139
- "min_seq_length": 1236,
1140
- "max_seq_length": 2528,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1478.1184615384616,
1154
- "min_seq_length": 1426,
1155
- "max_seq_length": 1585,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1262.9178571428572,
1169
- "min_seq_length": 1239,
1170
- "max_seq_length": 1509,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1258.4145785876992,
1184
- "min_seq_length": 1003,
1185
- "max_seq_length": 1740,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1751.801410105758,
1199
- "min_seq_length": 1717,
1200
- "max_seq_length": 1795,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1518.6845771144278,
1214
- "min_seq_length": 1497,
1215
- "max_seq_length": 1636,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9259642329554806,
5
+ "acc,all": 0.926062091503268,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.714480317302389,
10
+ "mse,all": 0.9154340392156861,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.6578581363004172,
15
+ "acc,exam_id__USP_2018": 0.5370370370370371,
16
+ "acc,exam_id__USP_2020": 0.6785714285714286,
17
+ "acc,exam_id__UNICAMP_2020": 0.6909090909090909,
18
+ "acc,exam_id__UNICAMP_2019": 0.7,
19
+ "acc,exam_id__UNICAMP_2022": 0.7948717948717948,
20
+ "acc,exam_id__USP_2021": 0.6538461538461539,
21
+ "acc,exam_id__USP_2019": 0.625,
22
+ "acc,exam_id__USP_2022": 0.673469387755102,
23
+ "acc,exam_id__UNICAMP_2024": 0.6222222222222222,
24
+ "acc,exam_id__USP_2023": 0.75,
25
+ "acc,exam_id__UNICAMP_2018": 0.5370370370370371,
26
+ "acc,exam_id__UNICAMP_2021_1": 0.5869565217391305,
27
+ "acc,exam_id__UNICAMP_2023": 0.7209302325581395,
28
+ "acc,exam_id__USP_2024": 0.8048780487804879,
29
+ "acc,exam_id__UNICAMP_2021_2": 0.5686274509803921,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.7319804058782365,
35
+ "acc,exam_id__2016_2": 0.7317073170731707,
36
+ "acc,exam_id__2016": 0.6859504132231405,
37
+ "acc,exam_id__2014": 0.7614678899082569,
38
+ "acc,exam_id__2011": 0.8205128205128205,
39
+ "acc,exam_id__2015": 0.7142857142857143,
40
+ "acc,exam_id__2012": 0.7413793103448276,
41
+ "acc,exam_id__2013": 0.6481481481481481,
42
+ "acc,exam_id__2010": 0.7435897435897436,
43
+ "acc,exam_id__2017": 0.7672413793103449,
44
+ "acc,exam_id__2009": 0.7043478260869566,
45
+ "acc,exam_id__2023": 0.7777777777777778,
46
+ "acc,exam_id__2022": 0.6842105263157895
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.6906170752324599,
50
+ "acc,all": 0.7184615384615385,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8460180802244769,
56
+ "acc,all": 0.8478571428571429
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.510250569476082,
60
+ "acc,exam_id__2013-10": 0.525,
61
+ "acc,exam_id__2012-09": 0.45454545454545453,
62
+ "acc,exam_id__2010-02": 0.58,
63
+ "acc,exam_id__2015-18": 0.5,
64
+ "acc,exam_id__2018-25": 0.425,
65
+ "acc,exam_id__2015-17": 0.5769230769230769,
66
+ "acc,exam_id__2011-03": 0.47474747474747475,
67
+ "acc,exam_id__2016-20": 0.5125,
68
+ "acc,exam_id__2011-04": 0.4125,
69
+ "acc,exam_id__2012-06a": 0.5625,
70
+ "acc,exam_id__2012-08": 0.5,
71
+ "acc,exam_id__2011-05": 0.5625,
72
+ "acc,exam_id__2014-15": 0.5512820512820513,
73
+ "acc,exam_id__2012-06": 0.525,
74
+ "acc,exam_id__2013-12": 0.5625,
75
+ "acc,exam_id__2010-01": 0.38823529411764707,
76
+ "acc,exam_id__2012-07": 0.575,
77
+ "acc,exam_id__2016-19": 0.5128205128205128,
78
+ "acc,exam_id__2016-21": 0.4375,
79
+ "acc,exam_id__2017-23": 0.4875,
80
+ "acc,exam_id__2017-24": 0.55,
81
+ "acc,exam_id__2016-20a": 0.45,
82
+ "acc,exam_id__2015-16": 0.55,
83
+ "acc,exam_id__2013-11": 0.475,
84
+ "acc,exam_id__2017-22": 0.55,
85
+ "acc,exam_id__2014-14": 0.525,
86
+ "acc,exam_id__2014-13": 0.55,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7355214633181597,
92
+ "acc,all": 0.7802585193889542
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6700678573508446,
96
+ "acc,all": 0.7069651741293532,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f39df64a8e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f39df64a2a0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f39df64a520>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f39df64aac0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f39df64ad40>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f39df649c60>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f39df649ee0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "b749dbcb19901b8fd0e9f38c923a24533569f895",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 27920486400,
1075
+ "model_num_parameters": 13960238080,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 16,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1268.9889705882354,
1094
+ "min_seq_length": 1246,
1095
+ "max_seq_length": 1335,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1499.9889705882354,
1109
+ "min_seq_length": 1477,
1110
+ "max_seq_length": 1566,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1609.7426981919332,
1124
+ "min_seq_length": 1243,
1125
+ "max_seq_length": 2369,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1488.9881035689293,
1139
+ "min_seq_length": 1236,
1140
+ "max_seq_length": 2528,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1478.1184615384616,
1154
+ "min_seq_length": 1426,
1155
+ "max_seq_length": 1585,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1262.9178571428572,
1169
+ "min_seq_length": 1239,
1170
+ "max_seq_length": 1509,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1258.4145785876992,
1184
+ "min_seq_length": 1003,
1185
+ "max_seq_length": 1740,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1751.801410105758,
1199
+ "min_seq_length": 1717,
1200
+ "max_seq_length": 1795,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1518.6845771144278,
1214
+ "min_seq_length": 1497,
1215
+ "max_seq_length": 1636,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
Danielbrdz/Barcenas-14b-Phi-3-medium-ORPO/results_2024-08-08T02-43-35.640819.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.7016934637445373,
38
- "all_grouped_npm": 0.5569268035545414,
39
  "all_grouped": {
40
  "enem_challenge": 0.7319804058782365,
41
  "bluex": 0.6578581363004172,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.6906170752324599,
46
  "hatebr_offensive": 0.8460180802244769,
47
  "portuguese_hate_speech": 0.7355214633181597,
48
- "tweetsentbr": 0.5025508930131336
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7319804058782365,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6906170752324599,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8460180802244769,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7355214633181597,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5025508930131336
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7319804058782365,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7355214633181597
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5025508930131336,
154
  "acc,all": 0.7069651741293532,
155
- "main_score": 0.5025508930131336
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7203064597820608,
38
+ "all_grouped_npm": 0.5846247143246655,
39
  "all_grouped": {
40
  "enem_challenge": 0.7319804058782365,
41
  "bluex": 0.6578581363004172,
 
45
  "faquad_nli": 0.6906170752324599,
46
  "hatebr_offensive": 0.8460180802244769,
47
  "portuguese_hate_speech": 0.7355214633181597,
48
+ "tweetsentbr": 0.6700678573508446
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7319804058782365,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6906170752324599,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8460180802244769,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7355214633181597,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6700678573508446
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7319804058782365,
 
150
  "main_score": 0.7355214633181597
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6700678573508446,
154
  "acc,all": 0.7069651741293532,
155
+ "main_score": 0.6700678573508446
156
  }
157
  },
158
  "config_tasks": {
Danielbrdz/Barcenas-Llama3-8b-ORPO/raw_2024-05-18T00-12-52.690138/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9178150146340144,
5
- "acc,all": 0.9178921568627451,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7260402501200387,
10
- "mse,all": 0.6636315359477125,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5827538247566064,
15
- "acc,exam_id__UNICAMP_2024": 0.7111111111111111,
16
- "acc,exam_id__USP_2019": 0.6,
17
- "acc,exam_id__UNICAMP_2019": 0.6,
18
- "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
19
- "acc,exam_id__USP_2021": 0.5961538461538461,
20
- "acc,exam_id__UNICAMP_2020": 0.5272727272727272,
21
- "acc,exam_id__UNICAMP_2022": 0.6923076923076923,
22
- "acc,exam_id__USP_2018": 0.5,
23
- "acc,exam_id__USP_2022": 0.5510204081632653,
24
- "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
25
- "acc,exam_id__UNICAMP_2023": 0.6511627906976745,
26
- "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
27
- "acc,exam_id__USP_2020": 0.5535714285714286,
28
- "acc,exam_id__USP_2024": 0.6829268292682927,
29
- "acc,exam_id__USP_2023": 0.6818181818181818,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.7102869139258222,
35
- "acc,exam_id__2012": 0.7155172413793104,
36
- "acc,exam_id__2017": 0.6896551724137931,
37
- "acc,exam_id__2013": 0.6851851851851852,
38
- "acc,exam_id__2016": 0.7024793388429752,
39
- "acc,exam_id__2011": 0.7264957264957265,
40
- "acc,exam_id__2015": 0.7310924369747899,
41
- "acc,exam_id__2022": 0.6766917293233082,
42
- "acc,exam_id__2014": 0.7155963302752294,
43
- "acc,exam_id__2010": 0.7435897435897436,
44
- "acc,exam_id__2009": 0.7304347826086957,
45
- "acc,exam_id__2016_2": 0.6504065040650406,
46
- "acc,exam_id__2023": 0.7555555555555555
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7308849598805747,
50
- "acc,all": 0.7815384615384615,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8698828946051447,
56
- "acc,all": 0.87
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.508883826879271,
60
- "acc,exam_id__2014-15": 0.6282051282051282,
61
- "acc,exam_id__2012-07": 0.4625,
62
- "acc,exam_id__2016-20a": 0.4375,
63
- "acc,exam_id__2015-16": 0.5,
64
- "acc,exam_id__2016-21": 0.4,
65
- "acc,exam_id__2013-10": 0.475,
66
- "acc,exam_id__2014-13": 0.425,
67
- "acc,exam_id__2010-02": 0.52,
68
- "acc,exam_id__2012-06": 0.5125,
69
- "acc,exam_id__2018-25": 0.5125,
70
- "acc,exam_id__2011-04": 0.5125,
71
- "acc,exam_id__2012-08": 0.5125,
72
- "acc,exam_id__2015-18": 0.5,
73
- "acc,exam_id__2011-05": 0.4625,
74
- "acc,exam_id__2012-09": 0.4935064935064935,
75
- "acc,exam_id__2017-24": 0.4625,
76
- "acc,exam_id__2012-06a": 0.5375,
77
- "acc,exam_id__2016-20": 0.5625,
78
- "acc,exam_id__2013-12": 0.575,
79
- "acc,exam_id__2016-19": 0.5128205128205128,
80
- "acc,exam_id__2014-14": 0.575,
81
- "acc,exam_id__2017-22": 0.6,
82
- "acc,exam_id__2010-01": 0.4,
83
- "acc,exam_id__2011-03": 0.48484848484848486,
84
- "acc,exam_id__2015-17": 0.6410256410256411,
85
- "acc,exam_id__2017-23": 0.525,
86
- "acc,exam_id__2013-11": 0.525,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.5958643988009942,
92
- "acc,all": 0.5969447708578144
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.4996436497852127,
96
- "acc,all": 0.7203980099502487,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f4883d942c0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f4883f5bc40>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f4883f5bec0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f4883d944a0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f4883d94720>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f4883f5b600>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f4883f5b880>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "66c848c4526d3db1ec41468c0f73ac4448c6abe9",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 16194748416,
1075
- "model_num_parameters": 8030261248,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 8,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1318.5322712418301,
1094
- "min_seq_length": 1299,
1095
- "max_seq_length": 1382,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1509.5322712418301,
1109
- "min_seq_length": 1490,
1110
- "max_seq_length": 1573,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1484.7719054242002,
1124
- "min_seq_length": 1165,
1125
- "max_seq_length": 2134,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1412.3547935619315,
1139
- "min_seq_length": 1187,
1140
- "max_seq_length": 2340,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1447.8215384615385,
1154
- "min_seq_length": 1402,
1155
- "max_seq_length": 1544,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1279.3878571428572,
1169
- "min_seq_length": 1259,
1170
- "max_seq_length": 1498,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1220.3772209567198,
1184
- "min_seq_length": 988,
1185
- "max_seq_length": 1654,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1676.4195064629848,
1199
- "min_seq_length": 1646,
1200
- "max_seq_length": 1708,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1537.1537313432837,
1214
- "min_seq_length": 1520,
1215
- "max_seq_length": 1585,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=Danielbrdz/Barcenas-Llama3-8b-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9178150146340144,
5
+ "acc,all": 0.9178921568627451,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7260402501200387,
10
+ "mse,all": 0.6636315359477125,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5827538247566064,
15
+ "acc,exam_id__UNICAMP_2024": 0.7111111111111111,
16
+ "acc,exam_id__USP_2019": 0.6,
17
+ "acc,exam_id__UNICAMP_2019": 0.6,
18
+ "acc,exam_id__UNICAMP_2021_2": 0.5294117647058824,
19
+ "acc,exam_id__USP_2021": 0.5961538461538461,
20
+ "acc,exam_id__UNICAMP_2020": 0.5272727272727272,
21
+ "acc,exam_id__UNICAMP_2022": 0.6923076923076923,
22
+ "acc,exam_id__USP_2018": 0.5,
23
+ "acc,exam_id__USP_2022": 0.5510204081632653,
24
+ "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
25
+ "acc,exam_id__UNICAMP_2023": 0.6511627906976745,
26
+ "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
27
+ "acc,exam_id__USP_2020": 0.5535714285714286,
28
+ "acc,exam_id__USP_2024": 0.6829268292682927,
29
+ "acc,exam_id__USP_2023": 0.6818181818181818,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.7102869139258222,
35
+ "acc,exam_id__2012": 0.7155172413793104,
36
+ "acc,exam_id__2017": 0.6896551724137931,
37
+ "acc,exam_id__2013": 0.6851851851851852,
38
+ "acc,exam_id__2016": 0.7024793388429752,
39
+ "acc,exam_id__2011": 0.7264957264957265,
40
+ "acc,exam_id__2015": 0.7310924369747899,
41
+ "acc,exam_id__2022": 0.6766917293233082,
42
+ "acc,exam_id__2014": 0.7155963302752294,
43
+ "acc,exam_id__2010": 0.7435897435897436,
44
+ "acc,exam_id__2009": 0.7304347826086957,
45
+ "acc,exam_id__2016_2": 0.6504065040650406,
46
+ "acc,exam_id__2023": 0.7555555555555555
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7308849598805747,
50
+ "acc,all": 0.7815384615384615,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8698828946051447,
56
+ "acc,all": 0.87
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.508883826879271,
60
+ "acc,exam_id__2014-15": 0.6282051282051282,
61
+ "acc,exam_id__2012-07": 0.4625,
62
+ "acc,exam_id__2016-20a": 0.4375,
63
+ "acc,exam_id__2015-16": 0.5,
64
+ "acc,exam_id__2016-21": 0.4,
65
+ "acc,exam_id__2013-10": 0.475,
66
+ "acc,exam_id__2014-13": 0.425,
67
+ "acc,exam_id__2010-02": 0.52,
68
+ "acc,exam_id__2012-06": 0.5125,
69
+ "acc,exam_id__2018-25": 0.5125,
70
+ "acc,exam_id__2011-04": 0.5125,
71
+ "acc,exam_id__2012-08": 0.5125,
72
+ "acc,exam_id__2015-18": 0.5,
73
+ "acc,exam_id__2011-05": 0.4625,
74
+ "acc,exam_id__2012-09": 0.4935064935064935,
75
+ "acc,exam_id__2017-24": 0.4625,
76
+ "acc,exam_id__2012-06a": 0.5375,
77
+ "acc,exam_id__2016-20": 0.5625,
78
+ "acc,exam_id__2013-12": 0.575,
79
+ "acc,exam_id__2016-19": 0.5128205128205128,
80
+ "acc,exam_id__2014-14": 0.575,
81
+ "acc,exam_id__2017-22": 0.6,
82
+ "acc,exam_id__2010-01": 0.4,
83
+ "acc,exam_id__2011-03": 0.48484848484848486,
84
+ "acc,exam_id__2015-17": 0.6410256410256411,
85
+ "acc,exam_id__2017-23": 0.525,
86
+ "acc,exam_id__2013-11": 0.525,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.5958643988009942,
92
+ "acc,all": 0.5969447708578144
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6661915330469502,
96
+ "acc,all": 0.7203980099502487,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f4883d942c0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f4883f5bc40>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f4883f5bec0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f4883d944a0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f4883d94720>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f4883f5b600>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f4883f5b880>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "66c848c4526d3db1ec41468c0f73ac4448c6abe9",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 16194748416,
1075
+ "model_num_parameters": 8030261248,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 8,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1318.5322712418301,
1094
+ "min_seq_length": 1299,
1095
+ "max_seq_length": 1382,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1509.5322712418301,
1109
+ "min_seq_length": 1490,
1110
+ "max_seq_length": 1573,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1484.7719054242002,
1124
+ "min_seq_length": 1165,
1125
+ "max_seq_length": 2134,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1412.3547935619315,
1139
+ "min_seq_length": 1187,
1140
+ "max_seq_length": 2340,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1447.8215384615385,
1154
+ "min_seq_length": 1402,
1155
+ "max_seq_length": 1544,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1279.3878571428572,
1169
+ "min_seq_length": 1259,
1170
+ "max_seq_length": 1498,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1220.3772209567198,
1184
+ "min_seq_length": 988,
1185
+ "max_seq_length": 1654,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1676.4195064629848,
1199
+ "min_seq_length": 1646,
1200
+ "max_seq_length": 1708,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1537.1537313432837,
1214
+ "min_seq_length": 1520,
1215
+ "max_seq_length": 1585,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=Danielbrdz/Barcenas-Llama3-8b-ORPO,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
Danielbrdz/Barcenas-Llama3-8b-ORPO/results_2024-05-18T00-12-52.690138.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6824506370430754,
38
- "all_grouped_npm": 0.525680465043327,
39
  "all_grouped": {
40
  "enem_challenge": 0.7102869139258222,
41
  "bluex": 0.5827538247566064,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7308849598805747,
46
  "hatebr_offensive": 0.8698828946051447,
47
  "portuguese_hate_speech": 0.5958643988009942,
48
- "tweetsentbr": 0.4996436497852127
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7102869139258222,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7308849598805747,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8698828946051447,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5958643988009942,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.4996436497852127
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7102869139258222,
@@ -150,9 +150,9 @@
150
  "main_score": 0.5958643988009942
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.4996436497852127,
154
  "acc,all": 0.7203980099502487,
155
- "main_score": 0.4996436497852127
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7009559574054907,
38
+ "all_grouped_npm": 0.553218144154064,
39
  "all_grouped": {
40
  "enem_challenge": 0.7102869139258222,
41
  "bluex": 0.5827538247566064,
 
45
  "faquad_nli": 0.7308849598805747,
46
  "hatebr_offensive": 0.8698828946051447,
47
  "portuguese_hate_speech": 0.5958643988009942,
48
+ "tweetsentbr": 0.6661915330469502
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7102869139258222,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7308849598805747,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8698828946051447,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.5958643988009942,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6661915330469502
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7102869139258222,
 
150
  "main_score": 0.5958643988009942
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6661915330469502,
154
  "acc,all": 0.7203980099502487,
155
+ "main_score": 0.6661915330469502
156
  }
157
  },
158
  "config_tasks": {
DeepMount00/Llama-3-8b-Ita/raw_2024-05-19T23-04-56.757278/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9174373136272109,
5
- "acc,all": 0.9174836601307189,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7274385305362817,
10
- "mse,all": 0.7363439542483661,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.588317107093185,
15
- "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131,
16
- "acc,exam_id__USP_2024": 0.6829268292682927,
17
- "acc,exam_id__UNICAMP_2019": 0.62,
18
- "acc,exam_id__UNICAMP_2023": 0.627906976744186,
19
- "acc,exam_id__USP_2018": 0.4444444444444444,
20
- "acc,exam_id__USP_2023": 0.6818181818181818,
21
- "acc,exam_id__UNICAMP_2018": 0.4444444444444444,
22
- "acc,exam_id__USP_2021": 0.5961538461538461,
23
- "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373,
24
- "acc,exam_id__UNICAMP_2024": 0.6444444444444445,
25
- "acc,exam_id__USP_2019": 0.625,
26
- "acc,exam_id__USP_2020": 0.5714285714285714,
27
- "acc,exam_id__UNICAMP_2022": 0.717948717948718,
28
- "acc,exam_id__UNICAMP_2020": 0.5818181818181818,
29
- "acc,exam_id__USP_2022": 0.5306122448979592,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.7116864940517844,
35
- "acc,exam_id__2014": 0.7247706422018348,
36
- "acc,exam_id__2017": 0.7068965517241379,
37
- "acc,exam_id__2023": 0.762962962962963,
38
- "acc,exam_id__2012": 0.7327586206896551,
39
- "acc,exam_id__2022": 0.6616541353383458,
40
- "acc,exam_id__2011": 0.7350427350427351,
41
- "acc,exam_id__2016_2": 0.6585365853658537,
42
- "acc,exam_id__2009": 0.7478260869565218,
43
- "acc,exam_id__2010": 0.7094017094017094,
44
- "acc,exam_id__2016": 0.6942148760330579,
45
- "acc,exam_id__2015": 0.7142857142857143,
46
- "acc,exam_id__2013": 0.6944444444444444
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7530594601709288,
50
- "acc,all": 0.8046153846153846,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8684530363041023,
56
- "acc,all": 0.8685714285714285
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.5084282460136674,
60
- "acc,exam_id__2011-05": 0.45,
61
- "acc,exam_id__2015-16": 0.4625,
62
- "acc,exam_id__2011-03": 0.5050505050505051,
63
- "acc,exam_id__2012-09": 0.5324675324675324,
64
- "acc,exam_id__2016-20": 0.575,
65
- "acc,exam_id__2014-15": 0.5641025641025641,
66
- "acc,exam_id__2013-12": 0.55,
67
- "acc,exam_id__2012-06": 0.5125,
68
- "acc,exam_id__2016-19": 0.5256410256410257,
69
- "acc,exam_id__2016-21": 0.3875,
70
- "acc,exam_id__2014-13": 0.425,
71
- "acc,exam_id__2013-10": 0.4375,
72
- "acc,exam_id__2015-18": 0.5125,
73
- "acc,exam_id__2018-25": 0.525,
74
- "acc,exam_id__2017-23": 0.5125,
75
- "acc,exam_id__2017-22": 0.575,
76
- "acc,exam_id__2017-24": 0.475,
77
- "acc,exam_id__2010-01": 0.3764705882352941,
78
- "acc,exam_id__2014-14": 0.625,
79
- "acc,exam_id__2012-06a": 0.55,
80
- "acc,exam_id__2012-08": 0.5125,
81
- "acc,exam_id__2016-20a": 0.45,
82
- "acc,exam_id__2012-07": 0.475,
83
- "acc,exam_id__2010-02": 0.55,
84
- "acc,exam_id__2015-17": 0.6410256410256411,
85
- "acc,exam_id__2011-04": 0.5125,
86
- "acc,exam_id__2013-11": 0.5125,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6102181878747501,
92
- "acc,all": 0.6122209165687427
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5047785208818167,
96
- "acc,all": 0.7223880597014926,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f986e5c04a0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f986e583b00>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f986e583d80>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f986e583560>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f986e583880>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f986e5831a0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f986e583420>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 0,
1064
- "non_truncated": 14150,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 0,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "c399bd706c749788d260ed5f47c3c5c3190f37d9",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 16194748416,
1075
- "model_num_parameters": 8030261248,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 4,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1318.5322712418301,
1094
- "min_seq_length": 1299,
1095
- "max_seq_length": 1382,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1509.5322712418301,
1109
- "min_seq_length": 1490,
1110
- "max_seq_length": 1573,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1484.7719054242002,
1124
- "min_seq_length": 1165,
1125
- "max_seq_length": 2134,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 0,
1134
- "non_truncated": 1429,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 0,
1138
- "mean_seq_length": 1412.3547935619315,
1139
- "min_seq_length": 1187,
1140
- "max_seq_length": 2340,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 3.0
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1447.8215384615385,
1154
- "min_seq_length": 1402,
1155
- "max_seq_length": 1544,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1279.3878571428572,
1169
- "min_seq_length": 1259,
1170
- "max_seq_length": 1498,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1220.3772209567198,
1184
- "min_seq_length": 988,
1185
- "max_seq_length": 1654,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1676.4195064629848,
1199
- "min_seq_length": 1646,
1200
- "max_seq_length": 1708,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1537.1537313432837,
1214
- "min_seq_length": 1520,
1215
- "max_seq_length": 1585,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=DeepMount00/Llama-3-8b-Ita,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9174373136272109,
5
+ "acc,all": 0.9174836601307189,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7274385305362817,
10
+ "mse,all": 0.7363439542483661,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.588317107093185,
15
+ "acc,exam_id__UNICAMP_2021_1": 0.6086956521739131,
16
+ "acc,exam_id__USP_2024": 0.6829268292682927,
17
+ "acc,exam_id__UNICAMP_2019": 0.62,
18
+ "acc,exam_id__UNICAMP_2023": 0.627906976744186,
19
+ "acc,exam_id__USP_2018": 0.4444444444444444,
20
+ "acc,exam_id__USP_2023": 0.6818181818181818,
21
+ "acc,exam_id__UNICAMP_2018": 0.4444444444444444,
22
+ "acc,exam_id__USP_2021": 0.5961538461538461,
23
+ "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373,
24
+ "acc,exam_id__UNICAMP_2024": 0.6444444444444445,
25
+ "acc,exam_id__USP_2019": 0.625,
26
+ "acc,exam_id__USP_2020": 0.5714285714285714,
27
+ "acc,exam_id__UNICAMP_2022": 0.717948717948718,
28
+ "acc,exam_id__UNICAMP_2020": 0.5818181818181818,
29
+ "acc,exam_id__USP_2022": 0.5306122448979592,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.7116864940517844,
35
+ "acc,exam_id__2014": 0.7247706422018348,
36
+ "acc,exam_id__2017": 0.7068965517241379,
37
+ "acc,exam_id__2023": 0.762962962962963,
38
+ "acc,exam_id__2012": 0.7327586206896551,
39
+ "acc,exam_id__2022": 0.6616541353383458,
40
+ "acc,exam_id__2011": 0.7350427350427351,
41
+ "acc,exam_id__2016_2": 0.6585365853658537,
42
+ "acc,exam_id__2009": 0.7478260869565218,
43
+ "acc,exam_id__2010": 0.7094017094017094,
44
+ "acc,exam_id__2016": 0.6942148760330579,
45
+ "acc,exam_id__2015": 0.7142857142857143,
46
+ "acc,exam_id__2013": 0.6944444444444444
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7530594601709288,
50
+ "acc,all": 0.8046153846153846,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8684530363041023,
56
+ "acc,all": 0.8685714285714285
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.5084282460136674,
60
+ "acc,exam_id__2011-05": 0.45,
61
+ "acc,exam_id__2015-16": 0.4625,
62
+ "acc,exam_id__2011-03": 0.5050505050505051,
63
+ "acc,exam_id__2012-09": 0.5324675324675324,
64
+ "acc,exam_id__2016-20": 0.575,
65
+ "acc,exam_id__2014-15": 0.5641025641025641,
66
+ "acc,exam_id__2013-12": 0.55,
67
+ "acc,exam_id__2012-06": 0.5125,
68
+ "acc,exam_id__2016-19": 0.5256410256410257,
69
+ "acc,exam_id__2016-21": 0.3875,
70
+ "acc,exam_id__2014-13": 0.425,
71
+ "acc,exam_id__2013-10": 0.4375,
72
+ "acc,exam_id__2015-18": 0.5125,
73
+ "acc,exam_id__2018-25": 0.525,
74
+ "acc,exam_id__2017-23": 0.5125,
75
+ "acc,exam_id__2017-22": 0.575,
76
+ "acc,exam_id__2017-24": 0.475,
77
+ "acc,exam_id__2010-01": 0.3764705882352941,
78
+ "acc,exam_id__2014-14": 0.625,
79
+ "acc,exam_id__2012-06a": 0.55,
80
+ "acc,exam_id__2012-08": 0.5125,
81
+ "acc,exam_id__2016-20a": 0.45,
82
+ "acc,exam_id__2012-07": 0.475,
83
+ "acc,exam_id__2010-02": 0.55,
84
+ "acc,exam_id__2015-17": 0.6410256410256411,
85
+ "acc,exam_id__2011-04": 0.5125,
86
+ "acc,exam_id__2013-11": 0.5125,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6102181878747501,
92
+ "acc,all": 0.6122209165687427
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6730380278424222,
96
+ "acc,all": 0.7223880597014926,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f986e5c04a0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f986e583b00>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f986e583d80>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f986e583560>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f986e583880>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f986e5831a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f986e583420>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 0,
1064
+ "non_truncated": 14150,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 0,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "c399bd706c749788d260ed5f47c3c5c3190f37d9",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 16194748416,
1075
+ "model_num_parameters": 8030261248,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 4,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1318.5322712418301,
1094
+ "min_seq_length": 1299,
1095
+ "max_seq_length": 1382,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1509.5322712418301,
1109
+ "min_seq_length": 1490,
1110
+ "max_seq_length": 1573,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1484.7719054242002,
1124
+ "min_seq_length": 1165,
1125
+ "max_seq_length": 2134,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 0,
1134
+ "non_truncated": 1429,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 0,
1138
+ "mean_seq_length": 1412.3547935619315,
1139
+ "min_seq_length": 1187,
1140
+ "max_seq_length": 2340,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 3.0
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1447.8215384615385,
1154
+ "min_seq_length": 1402,
1155
+ "max_seq_length": 1544,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1279.3878571428572,
1169
+ "min_seq_length": 1259,
1170
+ "max_seq_length": 1498,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1220.3772209567198,
1184
+ "min_seq_length": 988,
1185
+ "max_seq_length": 1654,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1676.4195064629848,
1199
+ "min_seq_length": 1646,
1200
+ "max_seq_length": 1708,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1537.1537313432837,
1214
+ "min_seq_length": 1520,
1215
+ "max_seq_length": 1585,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=DeepMount00/Llama-3-8b-Ita,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
DeepMount00/Llama-3-8b-Ita/results_2024-05-19T23-04-56.757278.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6877574329504141,
38
- "all_grouped_npm": 0.5347979327978014,
39
  "all_grouped": {
40
  "enem_challenge": 0.7116864940517844,
41
  "bluex": 0.588317107093185,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7530594601709288,
46
  "hatebr_offensive": 0.8684530363041023,
47
  "portuguese_hate_speech": 0.6102181878747501,
48
- "tweetsentbr": 0.5047785208818167
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7116864940517844,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7530594601709288,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8684530363041023,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6102181878747501,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5047785208818167
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7116864940517844,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6102181878747501
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5047785208818167,
154
  "acc,all": 0.7223880597014926,
155
- "main_score": 0.5047785208818167
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.7064529337238148,
38
+ "all_grouped_npm": 0.5626186184725045,
39
  "all_grouped": {
40
  "enem_challenge": 0.7116864940517844,
41
  "bluex": 0.588317107093185,
 
45
  "faquad_nli": 0.7530594601709288,
46
  "hatebr_offensive": 0.8684530363041023,
47
  "portuguese_hate_speech": 0.6102181878747501,
48
+ "tweetsentbr": 0.6730380278424222
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.7116864940517844,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7530594601709288,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8684530363041023,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6102181878747501,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6730380278424222
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.7116864940517844,
 
150
  "main_score": 0.6102181878747501
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6730380278424222,
154
  "acc,all": 0.7223880597014926,
155
+ "main_score": 0.6730380278424222
156
  }
157
  },
158
  "config_tasks": {
EleutherAI/pythia-14m/raw_2024-04-03T19-47-56.339960/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.2210516588115701,
5
- "acc,all": 0.48856209150326796,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.0006847937896062521,
10
- "mse,all": 1.8791258169934641,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.17941585535465926,
15
- "acc,exam_id__USP_2021": 0.19230769230769232,
16
- "acc,exam_id__USP_2018": 0.12962962962962962,
17
- "acc,exam_id__UNICAMP_2023": 0.27906976744186046,
18
- "acc,exam_id__USP_2024": 0.14634146341463414,
19
- "acc,exam_id__USP_2022": 0.1836734693877551,
20
- "acc,exam_id__UNICAMP_2019": 0.14,
21
- "acc,exam_id__USP_2020": 0.17857142857142858,
22
- "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913,
23
- "acc,exam_id__USP_2019": 0.225,
24
- "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
25
- "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
26
- "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941,
27
- "acc,exam_id__UNICAMP_2018": 0.2222222222222222,
28
- "acc,exam_id__USP_2023": 0.09090909090909091,
29
- "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.19104268719384185,
35
- "acc,exam_id__2013": 0.16666666666666666,
36
- "acc,exam_id__2012": 0.19827586206896552,
37
- "acc,exam_id__2015": 0.14285714285714285,
38
- "acc,exam_id__2016": 0.18181818181818182,
39
- "acc,exam_id__2009": 0.16521739130434782,
40
- "acc,exam_id__2023": 0.25925925925925924,
41
- "acc,exam_id__2016_2": 0.1951219512195122,
42
- "acc,exam_id__2010": 0.1623931623931624,
43
- "acc,exam_id__2014": 0.1834862385321101,
44
- "acc,exam_id__2022": 0.21804511278195488,
45
- "acc,exam_id__2011": 0.19658119658119658,
46
- "acc,exam_id__2017": 0.20689655172413793
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.4396551724137931,
50
- "acc,all": 0.7846153846153846,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.17328604471858133,
56
- "acc,all": 0.23142857142857143
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.21822323462414578,
60
- "acc,exam_id__2011-03": 0.23232323232323232,
61
- "acc,exam_id__2011-04": 0.25,
62
- "acc,exam_id__2011-05": 0.2375,
63
- "acc,exam_id__2016-19": 0.1794871794871795,
64
- "acc,exam_id__2017-23": 0.2,
65
- "acc,exam_id__2018-25": 0.25,
66
- "acc,exam_id__2012-09": 0.23376623376623376,
67
- "acc,exam_id__2017-24": 0.2125,
68
- "acc,exam_id__2014-14": 0.2625,
69
- "acc,exam_id__2015-17": 0.23076923076923078,
70
- "acc,exam_id__2012-07": 0.1,
71
- "acc,exam_id__2016-20": 0.2,
72
- "acc,exam_id__2013-11": 0.1625,
73
- "acc,exam_id__2016-21": 0.2125,
74
- "acc,exam_id__2012-06a": 0.2125,
75
- "acc,exam_id__2015-18": 0.25,
76
- "acc,exam_id__2012-08": 0.2125,
77
- "acc,exam_id__2013-12": 0.175,
78
- "acc,exam_id__2012-06": 0.225,
79
- "acc,exam_id__2015-16": 0.225,
80
- "acc,exam_id__2013-10": 0.2,
81
- "acc,exam_id__2014-13": 0.225,
82
- "acc,exam_id__2010-02": 0.21,
83
- "acc,exam_id__2014-15": 0.20512820512820512,
84
- "acc,exam_id__2016-20a": 0.2875,
85
- "acc,exam_id__2010-01": 0.25882352941176473,
86
- "acc,exam_id__2017-22": 0.2375,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.2692126355492692,
92
- "acc,all": 0.6709753231492362
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.008390382047306943,
96
- "acc,all": 0.004975124378109453,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fc263b9be20>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fc263b9b7e0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc263b9ba60>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fc263be4040>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc263be42c0>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fc263b9b1a0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fc263b9b420>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 42,
1144
- "non_truncated": 14108,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 56,
1148
- "has_chat_template": false,
1149
- "chat_type": null,
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "f33025648652797a390d8c54835273845b437161",
1153
- "model_dtype": "torch.float16",
1154
- "model_memory_footprint": 54087788,
1155
- "model_num_parameters": 14067712,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:0",
1160
- "batch_size": 32,
1161
- "max_length": 2048,
1162
- "max_ctx_length": 2016,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1378.0061274509803,
1174
- "min_seq_length": 1355,
1175
- "max_seq_length": 1444,
1176
- "max_ctx_length": 2016,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1514.0061274509803,
1189
- "min_seq_length": 1491,
1190
- "max_seq_length": 1580,
1191
- "max_ctx_length": 2016,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 26,
1199
- "non_truncated": 693,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 39,
1203
- "mean_seq_length": 1656.076495132128,
1204
- "min_seq_length": 1285,
1205
- "max_seq_length": 2440,
1206
- "max_ctx_length": 2016,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 2.945757997218359
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 16,
1214
- "non_truncated": 1413,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 17,
1218
- "mean_seq_length": 1559.0517844646606,
1219
- "min_seq_length": 1308,
1220
- "max_seq_length": 2520,
1221
- "max_ctx_length": 2016,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 2.988103568929321
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1578.8153846153846,
1234
- "min_seq_length": 1525,
1235
- "max_seq_length": 1688,
1236
- "max_ctx_length": 2016,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1292.5114285714285,
1249
- "min_seq_length": 1269,
1250
- "max_seq_length": 1535,
1251
- "max_ctx_length": 2016,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1340.5503416856493,
1264
- "min_seq_length": 1077,
1265
- "max_seq_length": 1805,
1266
- "max_ctx_length": 2016,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 1775.5558166862515,
1279
- "min_seq_length": 1741,
1280
- "max_seq_length": 1812,
1281
- "max_ctx_length": 2016,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1495.2800995024875,
1294
- "min_seq_length": 1475,
1295
- "max_seq_length": 1554,
1296
- "max_ctx_length": 2016,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=EleutherAI/pythia-14m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": null
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.33157748821735517,
5
+ "acc,all": 0.48856209150326796,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.0006847937896062521,
10
+ "mse,all": 1.8791258169934641,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.17941585535465926,
15
+ "acc,exam_id__USP_2021": 0.19230769230769232,
16
+ "acc,exam_id__USP_2018": 0.12962962962962962,
17
+ "acc,exam_id__UNICAMP_2023": 0.27906976744186046,
18
+ "acc,exam_id__USP_2024": 0.14634146341463414,
19
+ "acc,exam_id__USP_2022": 0.1836734693877551,
20
+ "acc,exam_id__UNICAMP_2019": 0.14,
21
+ "acc,exam_id__USP_2020": 0.17857142857142858,
22
+ "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913,
23
+ "acc,exam_id__USP_2019": 0.225,
24
+ "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
25
+ "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
26
+ "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941,
27
+ "acc,exam_id__UNICAMP_2018": 0.2222222222222222,
28
+ "acc,exam_id__USP_2023": 0.09090909090909091,
29
+ "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.19104268719384185,
35
+ "acc,exam_id__2013": 0.16666666666666666,
36
+ "acc,exam_id__2012": 0.19827586206896552,
37
+ "acc,exam_id__2015": 0.14285714285714285,
38
+ "acc,exam_id__2016": 0.18181818181818182,
39
+ "acc,exam_id__2009": 0.16521739130434782,
40
+ "acc,exam_id__2023": 0.25925925925925924,
41
+ "acc,exam_id__2016_2": 0.1951219512195122,
42
+ "acc,exam_id__2010": 0.1623931623931624,
43
+ "acc,exam_id__2014": 0.1834862385321101,
44
+ "acc,exam_id__2022": 0.21804511278195488,
45
+ "acc,exam_id__2011": 0.19658119658119658,
46
+ "acc,exam_id__2017": 0.20689655172413793
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.4396551724137931,
50
+ "acc,all": 0.7846153846153846,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.259929067077872,
56
+ "acc,all": 0.23142857142857143
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.21822323462414578,
60
+ "acc,exam_id__2011-03": 0.23232323232323232,
61
+ "acc,exam_id__2011-04": 0.25,
62
+ "acc,exam_id__2011-05": 0.2375,
63
+ "acc,exam_id__2016-19": 0.1794871794871795,
64
+ "acc,exam_id__2017-23": 0.2,
65
+ "acc,exam_id__2018-25": 0.25,
66
+ "acc,exam_id__2012-09": 0.23376623376623376,
67
+ "acc,exam_id__2017-24": 0.2125,
68
+ "acc,exam_id__2014-14": 0.2625,
69
+ "acc,exam_id__2015-17": 0.23076923076923078,
70
+ "acc,exam_id__2012-07": 0.1,
71
+ "acc,exam_id__2016-20": 0.2,
72
+ "acc,exam_id__2013-11": 0.1625,
73
+ "acc,exam_id__2016-21": 0.2125,
74
+ "acc,exam_id__2012-06a": 0.2125,
75
+ "acc,exam_id__2015-18": 0.25,
76
+ "acc,exam_id__2012-08": 0.2125,
77
+ "acc,exam_id__2013-12": 0.175,
78
+ "acc,exam_id__2012-06": 0.225,
79
+ "acc,exam_id__2015-16": 0.225,
80
+ "acc,exam_id__2013-10": 0.2,
81
+ "acc,exam_id__2014-13": 0.225,
82
+ "acc,exam_id__2010-02": 0.21,
83
+ "acc,exam_id__2014-15": 0.20512820512820512,
84
+ "acc,exam_id__2016-20a": 0.2875,
85
+ "acc,exam_id__2010-01": 0.25882352941176473,
86
+ "acc,exam_id__2017-22": 0.2375,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.4038189533239038,
92
+ "acc,all": 0.6709753231492362
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.011187176063075921,
96
+ "acc,all": 0.004975124378109453,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fc263b9be20>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fc263b9b7e0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc263b9ba60>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fc263be4040>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fc263be42c0>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fc263b9b1a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fc263b9b420>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 42,
1144
+ "non_truncated": 14108,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 56,
1148
+ "has_chat_template": false,
1149
+ "chat_type": null,
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "f33025648652797a390d8c54835273845b437161",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 54087788,
1155
+ "model_num_parameters": 14067712,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 32,
1161
+ "max_length": 2048,
1162
+ "max_ctx_length": 2016,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1378.0061274509803,
1174
+ "min_seq_length": 1355,
1175
+ "max_seq_length": 1444,
1176
+ "max_ctx_length": 2016,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1514.0061274509803,
1189
+ "min_seq_length": 1491,
1190
+ "max_seq_length": 1580,
1191
+ "max_ctx_length": 2016,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 26,
1199
+ "non_truncated": 693,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 39,
1203
+ "mean_seq_length": 1656.076495132128,
1204
+ "min_seq_length": 1285,
1205
+ "max_seq_length": 2440,
1206
+ "max_ctx_length": 2016,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 2.945757997218359
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 16,
1214
+ "non_truncated": 1413,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 17,
1218
+ "mean_seq_length": 1559.0517844646606,
1219
+ "min_seq_length": 1308,
1220
+ "max_seq_length": 2520,
1221
+ "max_ctx_length": 2016,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 2.988103568929321
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1578.8153846153846,
1234
+ "min_seq_length": 1525,
1235
+ "max_seq_length": 1688,
1236
+ "max_ctx_length": 2016,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1292.5114285714285,
1249
+ "min_seq_length": 1269,
1250
+ "max_seq_length": 1535,
1251
+ "max_ctx_length": 2016,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1340.5503416856493,
1264
+ "min_seq_length": 1077,
1265
+ "max_seq_length": 1805,
1266
+ "max_ctx_length": 2016,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1775.5558166862515,
1279
+ "min_seq_length": 1741,
1280
+ "max_seq_length": 1812,
1281
+ "max_ctx_length": 2016,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1495.2800995024875,
1294
+ "min_seq_length": 1475,
1295
+ "max_seq_length": 1554,
1296
+ "max_ctx_length": 2016,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=EleutherAI/pythia-14m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
EleutherAI/pythia-14m/results_2024-04-03T19-47-56.339960.json CHANGED
@@ -34,29 +34,29 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.1889958293891971,
38
- "all_grouped_npm": -0.24792686543396047,
39
  "all_grouped": {
40
  "enem_challenge": 0.19104268719384185,
41
  "bluex": 0.17941585535465926,
42
  "oab_exams": 0.21822323462414578,
43
- "assin2_rte": 0.2210516588115701,
44
  "assin2_sts": 0.0006847937896062521,
45
  "faquad_nli": 0.4396551724137931,
46
- "hatebr_offensive": 0.17328604471858133,
47
- "portuguese_hate_speech": 0.2692126355492692,
48
- "tweetsentbr": 0.008390382047306943
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.19104268719384185,
52
  "harness|bluex|bluex|None|3": 0.17941585535465926,
53
  "harness|oab_exams|oab_exams|None|3": 0.21822323462414578,
54
- "harness|assin2_rte|assin2_rte|None|15": 0.2210516588115701,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.0006847937896062521,
56
  "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.17328604471858133,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2692126355492692,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.008390382047306943
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.19104268719384185,
@@ -125,9 +125,9 @@
125
  "main_score": 0.21822323462414578
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
- "f1_macro,all": 0.2210516588115701,
129
  "acc,all": 0.48856209150326796,
130
- "main_score": 0.2210516588115701
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.0006847937896062521,
@@ -140,19 +140,19 @@
140
  "main_score": 0.4396551724137931
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.17328604471858133,
144
  "acc,all": 0.23142857142857143,
145
- "main_score": 0.17328604471858133
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.2692126355492692,
149
  "acc,all": 0.6709753231492362,
150
- "main_score": 0.2692126355492692
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.008390382047306943,
154
  "acc,all": 0.004975124378109453,
155
- "main_score": 0.008390382047306943
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.22617049200647255,
38
+ "all_grouped_npm": -0.1749423038952026,
39
  "all_grouped": {
40
  "enem_challenge": 0.19104268719384185,
41
  "bluex": 0.17941585535465926,
42
  "oab_exams": 0.21822323462414578,
43
+ "assin2_rte": 0.33157748821735517,
44
  "assin2_sts": 0.0006847937896062521,
45
  "faquad_nli": 0.4396551724137931,
46
+ "hatebr_offensive": 0.259929067077872,
47
+ "portuguese_hate_speech": 0.4038189533239038,
48
+ "tweetsentbr": 0.011187176063075921
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.19104268719384185,
52
  "harness|bluex|bluex|None|3": 0.17941585535465926,
53
  "harness|oab_exams|oab_exams|None|3": 0.21822323462414578,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.33157748821735517,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.0006847937896062521,
56
  "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.259929067077872,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.4038189533239038,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.011187176063075921
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.19104268719384185,
 
125
  "main_score": 0.21822323462414578
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.33157748821735517,
129
  "acc,all": 0.48856209150326796,
130
+ "main_score": 0.33157748821735517
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.0006847937896062521,
 
140
  "main_score": 0.4396551724137931
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.259929067077872,
144
  "acc,all": 0.23142857142857143,
145
+ "main_score": 0.259929067077872
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.4038189533239038,
149
  "acc,all": 0.6709753231492362,
150
+ "main_score": 0.4038189533239038
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.011187176063075921,
154
  "acc,all": 0.004975124378109453,
155
+ "main_score": 0.011187176063075921
156
  }
157
  },
158
  "config_tasks": {
EleutherAI/pythia-70m-deduped/raw_2024-04-03T21-10-06.848681/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.23382263963539596,
5
- "acc,all": 0.5053104575163399,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.02026922309956098,
10
- "mse,all": 2.6797467320261448,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.1835883171070932,
15
- "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
16
- "acc,exam_id__USP_2018": 0.12962962962962962,
17
- "acc,exam_id__UNICAMP_2024": 0.17777777777777778,
18
- "acc,exam_id__USP_2021": 0.17307692307692307,
19
- "acc,exam_id__USP_2019": 0.25,
20
- "acc,exam_id__UNICAMP_2019": 0.14,
21
- "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654,
22
- "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
23
- "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433,
24
- "acc,exam_id__USP_2022": 0.1836734693877551,
25
- "acc,exam_id__UNICAMP_2018": 0.2037037037037037,
26
- "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
27
- "acc,exam_id__USP_2023": 0.06818181818181818,
28
- "acc,exam_id__USP_2024": 0.12195121951219512,
29
- "acc,exam_id__USP_2020": 0.19642857142857142,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.172148355493352,
35
- "acc,exam_id__2012": 0.16379310344827586,
36
- "acc,exam_id__2011": 0.17094017094017094,
37
- "acc,exam_id__2016_2": 0.14634146341463414,
38
- "acc,exam_id__2017": 0.1896551724137931,
39
- "acc,exam_id__2009": 0.17391304347826086,
40
- "acc,exam_id__2015": 0.11764705882352941,
41
- "acc,exam_id__2014": 0.1743119266055046,
42
- "acc,exam_id__2022": 0.18796992481203006,
43
- "acc,exam_id__2010": 0.15384615384615385,
44
- "acc,exam_id__2016": 0.17355371900826447,
45
- "acc,exam_id__2013": 0.12037037037037036,
46
- "acc,exam_id__2023": 0.2740740740740741
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.2759039805530234,
50
- "acc,all": 0.6984615384615385,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.28076386043861,
56
- "acc,all": 0.48857142857142855
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.2041002277904328,
60
- "acc,exam_id__2012-08": 0.2,
61
- "acc,exam_id__2011-04": 0.225,
62
- "acc,exam_id__2016-20": 0.1875,
63
- "acc,exam_id__2012-09": 0.2077922077922078,
64
- "acc,exam_id__2016-20a": 0.25,
65
- "acc,exam_id__2017-24": 0.2,
66
- "acc,exam_id__2013-12": 0.15,
67
- "acc,exam_id__2012-06a": 0.2125,
68
- "acc,exam_id__2017-22": 0.225,
69
- "acc,exam_id__2014-13": 0.2,
70
- "acc,exam_id__2013-11": 0.1375,
71
- "acc,exam_id__2015-18": 0.225,
72
- "acc,exam_id__2011-03": 0.1717171717171717,
73
- "acc,exam_id__2012-06": 0.2375,
74
- "acc,exam_id__2011-05": 0.1875,
75
- "acc,exam_id__2010-02": 0.21,
76
- "acc,exam_id__2014-15": 0.19230769230769232,
77
- "acc,exam_id__2015-16": 0.2,
78
- "acc,exam_id__2014-14": 0.2625,
79
- "acc,exam_id__2016-19": 0.1794871794871795,
80
- "acc,exam_id__2017-23": 0.1875,
81
- "acc,exam_id__2015-17": 0.24358974358974358,
82
- "acc,exam_id__2016-21": 0.2125,
83
- "acc,exam_id__2012-07": 0.1375,
84
- "acc,exam_id__2013-10": 0.1875,
85
- "acc,exam_id__2010-01": 0.2235294117647059,
86
- "acc,exam_id__2018-25": 0.2625,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.24182579976211263,
92
- "acc,all": 0.36662749706227965
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.13108766233766234,
96
- "acc,all": 0.29502487562189056,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f92c3f736a0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f92c3f73060>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f92c3f732e0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f92c3f73880>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f92c3f73b00>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f92c3f72a20>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f92c3f72ca0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 42,
1144
- "non_truncated": 14108,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 56,
1148
- "has_chat_template": false,
1149
- "chat_type": null,
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "e93a9faa9c77e5d09219f6c868bfc7a1bd65593c",
1153
- "model_dtype": "torch.float16",
1154
- "model_memory_footprint": 167592140,
1155
- "model_num_parameters": 70426624,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:0",
1160
- "batch_size": 64,
1161
- "max_length": 2048,
1162
- "max_ctx_length": 2016,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1378.0061274509803,
1174
- "min_seq_length": 1355,
1175
- "max_seq_length": 1444,
1176
- "max_ctx_length": 2016,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1514.0061274509803,
1189
- "min_seq_length": 1491,
1190
- "max_seq_length": 1580,
1191
- "max_ctx_length": 2016,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 26,
1199
- "non_truncated": 693,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 39,
1203
- "mean_seq_length": 1656.076495132128,
1204
- "min_seq_length": 1285,
1205
- "max_seq_length": 2440,
1206
- "max_ctx_length": 2016,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 2.945757997218359
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 16,
1214
- "non_truncated": 1413,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 17,
1218
- "mean_seq_length": 1559.0517844646606,
1219
- "min_seq_length": 1308,
1220
- "max_seq_length": 2520,
1221
- "max_ctx_length": 2016,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 2.988103568929321
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1578.8153846153846,
1234
- "min_seq_length": 1525,
1235
- "max_seq_length": 1688,
1236
- "max_ctx_length": 2016,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1292.5114285714285,
1249
- "min_seq_length": 1269,
1250
- "max_seq_length": 1535,
1251
- "max_ctx_length": 2016,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1340.5503416856493,
1264
- "min_seq_length": 1077,
1265
- "max_seq_length": 1805,
1266
- "max_ctx_length": 2016,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 1775.5558166862515,
1279
- "min_seq_length": 1741,
1280
- "max_seq_length": 1812,
1281
- "max_ctx_length": 2016,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1495.2800995024875,
1294
- "min_seq_length": 1475,
1295
- "max_seq_length": 1554,
1296
- "max_ctx_length": 2016,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=EleutherAI/pythia-70m-deduped,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": null
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.35073395945309394,
5
+ "acc,all": 0.5053104575163399,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.02026922309956098,
10
+ "mse,all": 2.6797467320261448,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.1835883171070932,
15
+ "acc,exam_id__UNICAMP_2022": 0.23076923076923078,
16
+ "acc,exam_id__USP_2018": 0.12962962962962962,
17
+ "acc,exam_id__UNICAMP_2024": 0.17777777777777778,
18
+ "acc,exam_id__USP_2021": 0.17307692307692307,
19
+ "acc,exam_id__USP_2019": 0.25,
20
+ "acc,exam_id__UNICAMP_2019": 0.14,
21
+ "acc,exam_id__UNICAMP_2021_1": 0.30434782608695654,
22
+ "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
23
+ "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433,
24
+ "acc,exam_id__USP_2022": 0.1836734693877551,
25
+ "acc,exam_id__UNICAMP_2018": 0.2037037037037037,
26
+ "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
27
+ "acc,exam_id__USP_2023": 0.06818181818181818,
28
+ "acc,exam_id__USP_2024": 0.12195121951219512,
29
+ "acc,exam_id__USP_2020": 0.19642857142857142,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.172148355493352,
35
+ "acc,exam_id__2012": 0.16379310344827586,
36
+ "acc,exam_id__2011": 0.17094017094017094,
37
+ "acc,exam_id__2016_2": 0.14634146341463414,
38
+ "acc,exam_id__2017": 0.1896551724137931,
39
+ "acc,exam_id__2009": 0.17391304347826086,
40
+ "acc,exam_id__2015": 0.11764705882352941,
41
+ "acc,exam_id__2014": 0.1743119266055046,
42
+ "acc,exam_id__2022": 0.18796992481203006,
43
+ "acc,exam_id__2010": 0.15384615384615385,
44
+ "acc,exam_id__2016": 0.17355371900826447,
45
+ "acc,exam_id__2013": 0.12037037037037036,
46
+ "acc,exam_id__2023": 0.2740740740740741
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.4138559708295351,
50
+ "acc,all": 0.6984615384615385,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.421145790657915,
56
+ "acc,all": 0.48857142857142855
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.2041002277904328,
60
+ "acc,exam_id__2012-08": 0.2,
61
+ "acc,exam_id__2011-04": 0.225,
62
+ "acc,exam_id__2016-20": 0.1875,
63
+ "acc,exam_id__2012-09": 0.2077922077922078,
64
+ "acc,exam_id__2016-20a": 0.25,
65
+ "acc,exam_id__2017-24": 0.2,
66
+ "acc,exam_id__2013-12": 0.15,
67
+ "acc,exam_id__2012-06a": 0.2125,
68
+ "acc,exam_id__2017-22": 0.225,
69
+ "acc,exam_id__2014-13": 0.2,
70
+ "acc,exam_id__2013-11": 0.1375,
71
+ "acc,exam_id__2015-18": 0.225,
72
+ "acc,exam_id__2011-03": 0.1717171717171717,
73
+ "acc,exam_id__2012-06": 0.2375,
74
+ "acc,exam_id__2011-05": 0.1875,
75
+ "acc,exam_id__2010-02": 0.21,
76
+ "acc,exam_id__2014-15": 0.19230769230769232,
77
+ "acc,exam_id__2015-16": 0.2,
78
+ "acc,exam_id__2014-14": 0.2625,
79
+ "acc,exam_id__2016-19": 0.1794871794871795,
80
+ "acc,exam_id__2017-23": 0.1875,
81
+ "acc,exam_id__2015-17": 0.24358974358974358,
82
+ "acc,exam_id__2016-21": 0.2125,
83
+ "acc,exam_id__2012-07": 0.1375,
84
+ "acc,exam_id__2013-10": 0.1875,
85
+ "acc,exam_id__2010-01": 0.2235294117647059,
86
+ "acc,exam_id__2018-25": 0.2625,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.3627386996431689,
92
+ "acc,all": 0.36662749706227965
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.1747835497835498,
96
+ "acc,all": 0.29502487562189056,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f92c3f736a0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f92c3f73060>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f92c3f732e0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f92c3f73880>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f92c3f73b00>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f92c3f72a20>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f92c3f72ca0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 42,
1144
+ "non_truncated": 14108,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 56,
1148
+ "has_chat_template": false,
1149
+ "chat_type": null,
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "e93a9faa9c77e5d09219f6c868bfc7a1bd65593c",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 167592140,
1155
+ "model_num_parameters": 70426624,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 64,
1161
+ "max_length": 2048,
1162
+ "max_ctx_length": 2016,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1378.0061274509803,
1174
+ "min_seq_length": 1355,
1175
+ "max_seq_length": 1444,
1176
+ "max_ctx_length": 2016,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1514.0061274509803,
1189
+ "min_seq_length": 1491,
1190
+ "max_seq_length": 1580,
1191
+ "max_ctx_length": 2016,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 26,
1199
+ "non_truncated": 693,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 39,
1203
+ "mean_seq_length": 1656.076495132128,
1204
+ "min_seq_length": 1285,
1205
+ "max_seq_length": 2440,
1206
+ "max_ctx_length": 2016,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 2.945757997218359
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 16,
1214
+ "non_truncated": 1413,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 17,
1218
+ "mean_seq_length": 1559.0517844646606,
1219
+ "min_seq_length": 1308,
1220
+ "max_seq_length": 2520,
1221
+ "max_ctx_length": 2016,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 2.988103568929321
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1578.8153846153846,
1234
+ "min_seq_length": 1525,
1235
+ "max_seq_length": 1688,
1236
+ "max_ctx_length": 2016,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1292.5114285714285,
1249
+ "min_seq_length": 1269,
1250
+ "max_seq_length": 1535,
1251
+ "max_ctx_length": 2016,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1340.5503416856493,
1264
+ "min_seq_length": 1077,
1265
+ "max_seq_length": 1805,
1266
+ "max_ctx_length": 2016,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1775.5558166862515,
1279
+ "min_seq_length": 1741,
1280
+ "max_seq_length": 1812,
1281
+ "max_ctx_length": 2016,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1495.2800995024875,
1294
+ "min_seq_length": 1475,
1295
+ "max_seq_length": 1554,
1296
+ "max_ctx_length": 2016,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=EleutherAI/pythia-70m-deduped,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
EleutherAI/pythia-70m-deduped/results_2024-04-03T21-10-06.848681.json CHANGED
@@ -34,29 +34,29 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.19372334069080482,
38
- "all_grouped_npm": -0.24214649119144413,
39
  "all_grouped": {
40
  "enem_challenge": 0.172148355493352,
41
  "bluex": 0.1835883171070932,
42
  "oab_exams": 0.2041002277904328,
43
- "assin2_rte": 0.23382263963539596,
44
  "assin2_sts": 0.02026922309956098,
45
- "faquad_nli": 0.2759039805530234,
46
- "hatebr_offensive": 0.28076386043861,
47
- "portuguese_hate_speech": 0.24182579976211263,
48
- "tweetsentbr": 0.13108766233766234
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.172148355493352,
52
  "harness|bluex|bluex|None|3": 0.1835883171070932,
53
  "harness|oab_exams|oab_exams|None|3": 0.2041002277904328,
54
- "harness|assin2_rte|assin2_rte|None|15": 0.23382263963539596,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.02026922309956098,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.2759039805530234,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.28076386043861,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.24182579976211263,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.13108766233766234
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.172148355493352,
@@ -125,9 +125,9 @@
125
  "main_score": 0.2041002277904328
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
- "f1_macro,all": 0.23382263963539596,
129
  "acc,all": 0.5053104575163399,
130
- "main_score": 0.23382263963539596
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.02026922309956098,
@@ -135,24 +135,24 @@
135
  "main_score": 0.02026922309956098
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.2759039805530234,
139
  "acc,all": 0.6984615384615385,
140
- "main_score": 0.2759039805530234
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.28076386043861,
144
  "acc,all": 0.48857142857142855,
145
- "main_score": 0.28076386043861
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.24182579976211263,
149
  "acc,all": 0.36662749706227965,
150
- "main_score": 0.24182579976211263
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.13108766233766234,
154
  "acc,all": 0.29502487562189056,
155
- "main_score": 0.13108766233766234
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.25592934376196685,
38
+ "all_grouped_npm": -0.12378239539395361,
39
  "all_grouped": {
40
  "enem_challenge": 0.172148355493352,
41
  "bluex": 0.1835883171070932,
42
  "oab_exams": 0.2041002277904328,
43
+ "assin2_rte": 0.35073395945309394,
44
  "assin2_sts": 0.02026922309956098,
45
+ "faquad_nli": 0.4138559708295351,
46
+ "hatebr_offensive": 0.421145790657915,
47
+ "portuguese_hate_speech": 0.3627386996431689,
48
+ "tweetsentbr": 0.1747835497835498
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.172148355493352,
52
  "harness|bluex|bluex|None|3": 0.1835883171070932,
53
  "harness|oab_exams|oab_exams|None|3": 0.2041002277904328,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.35073395945309394,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.02026922309956098,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.4138559708295351,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.421145790657915,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3627386996431689,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.1747835497835498
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.172148355493352,
 
125
  "main_score": 0.2041002277904328
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.35073395945309394,
129
  "acc,all": 0.5053104575163399,
130
+ "main_score": 0.35073395945309394
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.02026922309956098,
 
135
  "main_score": 0.02026922309956098
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.4138559708295351,
139
  "acc,all": 0.6984615384615385,
140
+ "main_score": 0.4138559708295351
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.421145790657915,
144
  "acc,all": 0.48857142857142855,
145
+ "main_score": 0.421145790657915
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.3627386996431689,
149
  "acc,all": 0.36662749706227965,
150
+ "main_score": 0.3627386996431689
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.1747835497835498,
154
  "acc,all": 0.29502487562189056,
155
+ "main_score": 0.1747835497835498
156
  }
157
  },
158
  "config_tasks": {
EleutherAI/pythia-70m/raw_2024-04-24T21-25-37.361813/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.4502521949740358,
5
- "acc,all": 0.5171568627450981,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.006173005990956128,
10
- "mse,all": 2.2616870915032687,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.2086230876216968,
15
- "acc,exam_id__UNICAMP_2020": 0.2,
16
- "acc,exam_id__UNICAMP_2023": 0.37209302325581395,
17
- "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739,
18
- "acc,exam_id__UNICAMP_2022": 0.28205128205128205,
19
- "acc,exam_id__USP_2018": 0.07407407407407407,
20
- "acc,exam_id__USP_2019": 0.15,
21
- "acc,exam_id__USP_2020": 0.21428571428571427,
22
- "acc,exam_id__USP_2023": 0.09090909090909091,
23
- "acc,exam_id__USP_2024": 0.12195121951219512,
24
- "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784,
25
- "acc,exam_id__UNICAMP_2018": 0.3148148148148148,
26
- "acc,exam_id__UNICAMP_2019": 0.22,
27
- "acc,exam_id__USP_2021": 0.17307692307692307,
28
- "acc,exam_id__UNICAMP_2024": 0.2222222222222222,
29
- "acc,exam_id__USP_2022": 0.10204081632653061,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.0622813156053184,
35
- "acc,exam_id__2015": 0.058823529411764705,
36
- "acc,exam_id__2017": 0.10344827586206896,
37
- "acc,exam_id__2009": 0.034782608695652174,
38
- "acc,exam_id__2010": 0.05982905982905983,
39
- "acc,exam_id__2011": 0.042735042735042736,
40
- "acc,exam_id__2023": 0.05925925925925926,
41
- "acc,exam_id__2014": 0.03669724770642202,
42
- "acc,exam_id__2016_2": 0.04878048780487805,
43
- "acc,exam_id__2022": 0.06766917293233082,
44
- "acc,exam_id__2012": 0.13793103448275862,
45
- "acc,exam_id__2016": 0.06611570247933884,
46
- "acc,exam_id__2013": 0.027777777777777776
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.4396551724137931,
50
- "acc,all": 0.7846153846153846,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.419144092439547,
56
- "acc,all": 0.5042857142857143
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.030068337129840545,
60
- "acc,exam_id__2016-21": 0.025,
61
- "acc,exam_id__2013-10": 0.0125,
62
- "acc,exam_id__2017-22": 0.0625,
63
- "acc,exam_id__2017-23": 0.0125,
64
- "acc,exam_id__2010-01": 0.011764705882352941,
65
- "acc,exam_id__2011-04": 0.025,
66
- "acc,exam_id__2012-06a": 0.025,
67
- "acc,exam_id__2013-12": 0.0125,
68
- "acc,exam_id__2016-20a": 0.025,
69
- "acc,exam_id__2014-15": 0.01282051282051282,
70
- "acc,exam_id__2014-14": 0.0125,
71
- "acc,exam_id__2011-03": 0.04040404040404041,
72
- "acc,exam_id__2016-20": 0.025,
73
- "acc,exam_id__2012-09": 0.03896103896103896,
74
- "acc,exam_id__2011-05": 0.0625,
75
- "acc,exam_id__2010-02": 0.03,
76
- "acc,exam_id__2018-25": 0.025,
77
- "acc,exam_id__2012-06": 0.0625,
78
- "acc,exam_id__2017-24": 0.05,
79
- "acc,exam_id__2012-07": 0.025,
80
- "acc,exam_id__2015-17": 0.05128205128205128,
81
- "acc,exam_id__2012-08": 0.025,
82
- "acc,exam_id__2015-18": 0.0375,
83
- "acc,exam_id__2016-19": 0.02564102564102564,
84
- "acc,exam_id__2015-16": 0.025,
85
- "acc,exam_id__2014-13": 0.025,
86
- "acc,exam_id__2013-11": 0.025,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.3087375175771073,
92
- "acc,all": 0.33137485311398357
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.12087469376644588,
96
- "acc,all": 0.2945273631840796,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff221d8ff60>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7ff221d8f600>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff221d8f880>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7ff221d8f060>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff221d8f380>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7ff221d8eca0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7ff221d8ef20>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 42,
1064
- "non_truncated": 14108,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 56,
1068
- "has_chat_template": false,
1069
- "chat_type": null,
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "a39f36b100fe8a5377810d56c3f4789b9c53ac42",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 167592140,
1075
- "model_num_parameters": 70426624,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 32,
1081
- "max_length": 2048,
1082
- "max_ctx_length": 2016,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1378.0061274509803,
1094
- "min_seq_length": 1355,
1095
- "max_seq_length": 1444,
1096
- "max_ctx_length": 2016,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1514.0061274509803,
1109
- "min_seq_length": 1491,
1110
- "max_seq_length": 1580,
1111
- "max_ctx_length": 2016,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 26,
1119
- "non_truncated": 693,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 39,
1123
- "mean_seq_length": 1656.076495132128,
1124
- "min_seq_length": 1285,
1125
- "max_seq_length": 2440,
1126
- "max_ctx_length": 2016,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.945757997218359
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 16,
1134
- "non_truncated": 1413,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 17,
1138
- "mean_seq_length": 1559.0517844646606,
1139
- "min_seq_length": 1308,
1140
- "max_seq_length": 2520,
1141
- "max_ctx_length": 2016,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.988103568929321
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1578.8153846153846,
1154
- "min_seq_length": 1525,
1155
- "max_seq_length": 1688,
1156
- "max_ctx_length": 2016,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1292.5114285714285,
1169
- "min_seq_length": 1269,
1170
- "max_seq_length": 1535,
1171
- "max_ctx_length": 2016,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1340.5503416856493,
1184
- "min_seq_length": 1077,
1185
- "max_seq_length": 1805,
1186
- "max_ctx_length": 2016,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1775.5558166862515,
1199
- "min_seq_length": 1741,
1200
- "max_seq_length": 1812,
1201
- "max_ctx_length": 2016,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1495.2800995024875,
1214
- "min_seq_length": 1475,
1215
- "max_seq_length": 1554,
1216
- "max_ctx_length": 2016,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=EleutherAI/pythia-70m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.4502521949740358,
5
+ "acc,all": 0.5171568627450981,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.006173005990956128,
10
+ "mse,all": 2.2616870915032687,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.2086230876216968,
15
+ "acc,exam_id__UNICAMP_2020": 0.2,
16
+ "acc,exam_id__UNICAMP_2023": 0.37209302325581395,
17
+ "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739,
18
+ "acc,exam_id__UNICAMP_2022": 0.28205128205128205,
19
+ "acc,exam_id__USP_2018": 0.07407407407407407,
20
+ "acc,exam_id__USP_2019": 0.15,
21
+ "acc,exam_id__USP_2020": 0.21428571428571427,
22
+ "acc,exam_id__USP_2023": 0.09090909090909091,
23
+ "acc,exam_id__USP_2024": 0.12195121951219512,
24
+ "acc,exam_id__UNICAMP_2021_2": 0.3137254901960784,
25
+ "acc,exam_id__UNICAMP_2018": 0.3148148148148148,
26
+ "acc,exam_id__UNICAMP_2019": 0.22,
27
+ "acc,exam_id__USP_2021": 0.17307692307692307,
28
+ "acc,exam_id__UNICAMP_2024": 0.2222222222222222,
29
+ "acc,exam_id__USP_2022": 0.10204081632653061,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.0622813156053184,
35
+ "acc,exam_id__2015": 0.058823529411764705,
36
+ "acc,exam_id__2017": 0.10344827586206896,
37
+ "acc,exam_id__2009": 0.034782608695652174,
38
+ "acc,exam_id__2010": 0.05982905982905983,
39
+ "acc,exam_id__2011": 0.042735042735042736,
40
+ "acc,exam_id__2023": 0.05925925925925926,
41
+ "acc,exam_id__2014": 0.03669724770642202,
42
+ "acc,exam_id__2016_2": 0.04878048780487805,
43
+ "acc,exam_id__2022": 0.06766917293233082,
44
+ "acc,exam_id__2012": 0.13793103448275862,
45
+ "acc,exam_id__2016": 0.06611570247933884,
46
+ "acc,exam_id__2013": 0.027777777777777776
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.4396551724137931,
50
+ "acc,all": 0.7846153846153846,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.419144092439547,
56
+ "acc,all": 0.5042857142857143
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.030068337129840545,
60
+ "acc,exam_id__2016-21": 0.025,
61
+ "acc,exam_id__2013-10": 0.0125,
62
+ "acc,exam_id__2017-22": 0.0625,
63
+ "acc,exam_id__2017-23": 0.0125,
64
+ "acc,exam_id__2010-01": 0.011764705882352941,
65
+ "acc,exam_id__2011-04": 0.025,
66
+ "acc,exam_id__2012-06a": 0.025,
67
+ "acc,exam_id__2013-12": 0.0125,
68
+ "acc,exam_id__2016-20a": 0.025,
69
+ "acc,exam_id__2014-15": 0.01282051282051282,
70
+ "acc,exam_id__2014-14": 0.0125,
71
+ "acc,exam_id__2011-03": 0.04040404040404041,
72
+ "acc,exam_id__2016-20": 0.025,
73
+ "acc,exam_id__2012-09": 0.03896103896103896,
74
+ "acc,exam_id__2011-05": 0.0625,
75
+ "acc,exam_id__2010-02": 0.03,
76
+ "acc,exam_id__2018-25": 0.025,
77
+ "acc,exam_id__2012-06": 0.0625,
78
+ "acc,exam_id__2017-24": 0.05,
79
+ "acc,exam_id__2012-07": 0.025,
80
+ "acc,exam_id__2015-17": 0.05128205128205128,
81
+ "acc,exam_id__2012-08": 0.025,
82
+ "acc,exam_id__2015-18": 0.0375,
83
+ "acc,exam_id__2016-19": 0.02564102564102564,
84
+ "acc,exam_id__2015-16": 0.025,
85
+ "acc,exam_id__2014-13": 0.025,
86
+ "acc,exam_id__2013-11": 0.025,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.3087375175771073,
92
+ "acc,all": 0.33137485311398357
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.16116625835526113,
96
+ "acc,all": 0.2945273631840796,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff221d8ff60>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff221d8f600>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff221d8f880>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff221d8f060>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff221d8f380>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7ff221d8eca0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7ff221d8ef20>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 42,
1064
+ "non_truncated": 14108,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 56,
1068
+ "has_chat_template": false,
1069
+ "chat_type": null,
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "a39f36b100fe8a5377810d56c3f4789b9c53ac42",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 167592140,
1075
+ "model_num_parameters": 70426624,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 32,
1081
+ "max_length": 2048,
1082
+ "max_ctx_length": 2016,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1378.0061274509803,
1094
+ "min_seq_length": 1355,
1095
+ "max_seq_length": 1444,
1096
+ "max_ctx_length": 2016,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1514.0061274509803,
1109
+ "min_seq_length": 1491,
1110
+ "max_seq_length": 1580,
1111
+ "max_ctx_length": 2016,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 26,
1119
+ "non_truncated": 693,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 39,
1123
+ "mean_seq_length": 1656.076495132128,
1124
+ "min_seq_length": 1285,
1125
+ "max_seq_length": 2440,
1126
+ "max_ctx_length": 2016,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.945757997218359
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 16,
1134
+ "non_truncated": 1413,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 17,
1138
+ "mean_seq_length": 1559.0517844646606,
1139
+ "min_seq_length": 1308,
1140
+ "max_seq_length": 2520,
1141
+ "max_ctx_length": 2016,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.988103568929321
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1578.8153846153846,
1154
+ "min_seq_length": 1525,
1155
+ "max_seq_length": 1688,
1156
+ "max_ctx_length": 2016,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1292.5114285714285,
1169
+ "min_seq_length": 1269,
1170
+ "max_seq_length": 1535,
1171
+ "max_ctx_length": 2016,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1340.5503416856493,
1184
+ "min_seq_length": 1077,
1185
+ "max_seq_length": 1805,
1186
+ "max_ctx_length": 2016,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1775.5558166862515,
1199
+ "min_seq_length": 1741,
1200
+ "max_seq_length": 1812,
1201
+ "max_ctx_length": 2016,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1495.2800995024875,
1214
+ "min_seq_length": 1475,
1215
+ "max_seq_length": 1554,
1216
+ "max_ctx_length": 2016,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=EleutherAI/pythia-70m,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
EleutherAI/pythia-70m/results_2024-04-24T21-25-37.361813.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.2273121575020823,
38
- "all_grouped_npm": -0.15629152255175874,
39
  "all_grouped": {
40
  "enem_challenge": 0.0622813156053184,
41
  "bluex": 0.2086230876216968,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.4396551724137931,
46
  "hatebr_offensive": 0.419144092439547,
47
  "portuguese_hate_speech": 0.3087375175771073,
48
- "tweetsentbr": 0.12087469376644588
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.0622813156053184,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.419144092439547,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3087375175771073,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.12087469376644588
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.0622813156053184,
@@ -150,9 +150,9 @@
150
  "main_score": 0.3087375175771073
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.12087469376644588,
154
  "acc,all": 0.2945273631840796,
155
- "main_score": 0.12087469376644588
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.2317889980119507,
38
+ "all_grouped_npm": -0.14962955750731177,
39
  "all_grouped": {
40
  "enem_challenge": 0.0622813156053184,
41
  "bluex": 0.2086230876216968,
 
45
  "faquad_nli": 0.4396551724137931,
46
  "hatebr_offensive": 0.419144092439547,
47
  "portuguese_hate_speech": 0.3087375175771073,
48
+ "tweetsentbr": 0.16116625835526113
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.0622813156053184,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.4396551724137931,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.419144092439547,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.3087375175771073,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.16116625835526113
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.0622813156053184,
 
150
  "main_score": 0.3087375175771073
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.16116625835526113,
154
  "acc,all": 0.2945273631840796,
155
+ "main_score": 0.16116625835526113
156
  }
157
  },
158
  "config_tasks": {
FuseAI/FuseChat-7B-VaRM/raw_2024-03-08T15-26-39.517660/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9272868051476477,
5
- "acc,all": 0.9272875816993464,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7836651113903375,
10
- "mse,all": 0.415110294117647,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5493741307371349,
15
- "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
16
- "acc,exam_id__UNICAMP_2024": 0.5555555555555556,
17
- "acc,exam_id__USP_2024": 0.6341463414634146,
18
- "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
19
- "acc,exam_id__USP_2021": 0.5769230769230769,
20
- "acc,exam_id__UNICAMP_2020": 0.5272727272727272,
21
- "acc,exam_id__USP_2019": 0.45,
22
- "acc,exam_id__USP_2020": 0.5178571428571429,
23
- "acc,exam_id__UNICAMP_2019": 0.54,
24
- "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
25
- "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
26
- "acc,exam_id__USP_2018": 0.5370370370370371,
27
- "acc,exam_id__USP_2022": 0.6122448979591837,
28
- "acc,exam_id__USP_2023": 0.5909090909090909,
29
- "acc,exam_id__UNICAMP_2018": 0.5925925925925926,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6480055983205039,
35
- "acc,exam_id__2011": 0.7008547008547008,
36
- "acc,exam_id__2009": 0.6521739130434783,
37
- "acc,exam_id__2012": 0.646551724137931,
38
- "acc,exam_id__2016": 0.5950413223140496,
39
- "acc,exam_id__2017": 0.6551724137931034,
40
- "acc,exam_id__2022": 0.6090225563909775,
41
- "acc,exam_id__2023": 0.6666666666666666,
42
- "acc,exam_id__2013": 0.6759259259259259,
43
- "acc,exam_id__2015": 0.6302521008403361,
44
- "acc,exam_id__2010": 0.6495726495726496,
45
- "acc,exam_id__2016_2": 0.6422764227642277,
46
- "acc,exam_id__2014": 0.6605504587155964
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.787259111855886,
50
- "acc,all": 0.8461538461538461,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8223021238433512,
56
- "acc,all": 0.8257142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.4182232346241458,
60
- "acc,exam_id__2017-23": 0.4125,
61
- "acc,exam_id__2010-02": 0.42,
62
- "acc,exam_id__2012-06": 0.4375,
63
- "acc,exam_id__2016-20a": 0.4,
64
- "acc,exam_id__2018-25": 0.4,
65
- "acc,exam_id__2011-05": 0.475,
66
- "acc,exam_id__2013-10": 0.3625,
67
- "acc,exam_id__2015-16": 0.4375,
68
- "acc,exam_id__2013-12": 0.4625,
69
- "acc,exam_id__2016-21": 0.3875,
70
- "acc,exam_id__2011-04": 0.325,
71
- "acc,exam_id__2010-01": 0.35294117647058826,
72
- "acc,exam_id__2012-07": 0.4,
73
- "acc,exam_id__2012-08": 0.4125,
74
- "acc,exam_id__2014-13": 0.325,
75
- "acc,exam_id__2013-11": 0.4625,
76
- "acc,exam_id__2015-18": 0.45,
77
- "acc,exam_id__2016-19": 0.47435897435897434,
78
- "acc,exam_id__2014-14": 0.5,
79
- "acc,exam_id__2012-09": 0.4155844155844156,
80
- "acc,exam_id__2012-06a": 0.4125,
81
- "acc,exam_id__2017-22": 0.425,
82
- "acc,exam_id__2014-15": 0.44871794871794873,
83
- "acc,exam_id__2015-17": 0.5512820512820513,
84
- "acc,exam_id__2017-24": 0.4625,
85
- "acc,exam_id__2016-20": 0.375,
86
- "acc,exam_id__2011-03": 0.3333333333333333,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6973371097488426,
92
- "acc,all": 0.7414806110458284
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.44067858320963216,
96
- "acc,all": 0.6666666666666666,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f116cc853a0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f116cc84d60>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f116cc84fe0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f116cc85580>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f116cc85800>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f116cc84720>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f116cc849a0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 4,
1144
- "non_truncated": 14146,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 4,
1148
- "has_chat_template": true,
1149
- "chat_type": "system_user_assistant",
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "cda1eebe5ba3912c900045ed7847600e29b22c64",
1153
- "model_dtype": "torch.bfloat16",
1154
- "model_memory_footprint": 14617722880,
1155
- "model_num_parameters": 7241748480,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:1",
1160
- "batch_size": 16,
1161
- "max_length": 2560,
1162
- "max_ctx_length": 2528,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1584.7455065359477,
1174
- "min_seq_length": 1561,
1175
- "max_seq_length": 1651,
1176
- "max_ctx_length": 2528,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1824.7455065359477,
1189
- "min_seq_length": 1801,
1190
- "max_seq_length": 1891,
1191
- "max_ctx_length": 2528,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 2,
1199
- "non_truncated": 717,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 2,
1203
- "mean_seq_length": 1782.9262865090404,
1204
- "min_seq_length": 1406,
1205
- "max_seq_length": 2583,
1206
- "max_ctx_length": 2528,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 2.9972183588317107
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 2,
1214
- "non_truncated": 1427,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 2,
1218
- "mean_seq_length": 1683.039188243527,
1219
- "min_seq_length": 1417,
1220
- "max_seq_length": 2681,
1221
- "max_ctx_length": 2528,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 2.998600419874038
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1825.9876923076922,
1234
- "min_seq_length": 1770,
1235
- "max_seq_length": 1946,
1236
- "max_ctx_length": 2528,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1676.3878571428572,
1249
- "min_seq_length": 1653,
1250
- "max_seq_length": 1927,
1251
- "max_ctx_length": 2528,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1428.764464692483,
1264
- "min_seq_length": 1162,
1265
- "max_seq_length": 1931,
1266
- "max_ctx_length": 2528,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 2177.3360752056406,
1279
- "min_seq_length": 2142,
1280
- "max_seq_length": 2216,
1281
- "max_ctx_length": 2528,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1923.2492537313433,
1294
- "min_seq_length": 1902,
1295
- "max_seq_length": 2018,
1296
- "max_ctx_length": 2528,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=FuseAI/FuseChat-7B-VaRM,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": null
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9272868051476477,
5
+ "acc,all": 0.9272875816993464,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7836651113903375,
10
+ "mse,all": 0.415110294117647,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5493741307371349,
15
+ "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
16
+ "acc,exam_id__UNICAMP_2024": 0.5555555555555556,
17
+ "acc,exam_id__USP_2024": 0.6341463414634146,
18
+ "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
19
+ "acc,exam_id__USP_2021": 0.5769230769230769,
20
+ "acc,exam_id__UNICAMP_2020": 0.5272727272727272,
21
+ "acc,exam_id__USP_2019": 0.45,
22
+ "acc,exam_id__USP_2020": 0.5178571428571429,
23
+ "acc,exam_id__UNICAMP_2019": 0.54,
24
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
25
+ "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
26
+ "acc,exam_id__USP_2018": 0.5370370370370371,
27
+ "acc,exam_id__USP_2022": 0.6122448979591837,
28
+ "acc,exam_id__USP_2023": 0.5909090909090909,
29
+ "acc,exam_id__UNICAMP_2018": 0.5925925925925926,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6480055983205039,
35
+ "acc,exam_id__2011": 0.7008547008547008,
36
+ "acc,exam_id__2009": 0.6521739130434783,
37
+ "acc,exam_id__2012": 0.646551724137931,
38
+ "acc,exam_id__2016": 0.5950413223140496,
39
+ "acc,exam_id__2017": 0.6551724137931034,
40
+ "acc,exam_id__2022": 0.6090225563909775,
41
+ "acc,exam_id__2023": 0.6666666666666666,
42
+ "acc,exam_id__2013": 0.6759259259259259,
43
+ "acc,exam_id__2015": 0.6302521008403361,
44
+ "acc,exam_id__2010": 0.6495726495726496,
45
+ "acc,exam_id__2016_2": 0.6422764227642277,
46
+ "acc,exam_id__2014": 0.6605504587155964
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.787259111855886,
50
+ "acc,all": 0.8461538461538461,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8223021238433512,
56
+ "acc,all": 0.8257142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.4182232346241458,
60
+ "acc,exam_id__2017-23": 0.4125,
61
+ "acc,exam_id__2010-02": 0.42,
62
+ "acc,exam_id__2012-06": 0.4375,
63
+ "acc,exam_id__2016-20a": 0.4,
64
+ "acc,exam_id__2018-25": 0.4,
65
+ "acc,exam_id__2011-05": 0.475,
66
+ "acc,exam_id__2013-10": 0.3625,
67
+ "acc,exam_id__2015-16": 0.4375,
68
+ "acc,exam_id__2013-12": 0.4625,
69
+ "acc,exam_id__2016-21": 0.3875,
70
+ "acc,exam_id__2011-04": 0.325,
71
+ "acc,exam_id__2010-01": 0.35294117647058826,
72
+ "acc,exam_id__2012-07": 0.4,
73
+ "acc,exam_id__2012-08": 0.4125,
74
+ "acc,exam_id__2014-13": 0.325,
75
+ "acc,exam_id__2013-11": 0.4625,
76
+ "acc,exam_id__2015-18": 0.45,
77
+ "acc,exam_id__2016-19": 0.47435897435897434,
78
+ "acc,exam_id__2014-14": 0.5,
79
+ "acc,exam_id__2012-09": 0.4155844155844156,
80
+ "acc,exam_id__2012-06a": 0.4125,
81
+ "acc,exam_id__2017-22": 0.425,
82
+ "acc,exam_id__2014-15": 0.44871794871794873,
83
+ "acc,exam_id__2015-17": 0.5512820512820513,
84
+ "acc,exam_id__2017-24": 0.4625,
85
+ "acc,exam_id__2016-20": 0.375,
86
+ "acc,exam_id__2011-03": 0.3333333333333333,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6973371097488426,
92
+ "acc,all": 0.7414806110458284
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.5875714442795096,
96
+ "acc,all": 0.6666666666666666,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f116cc853a0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f116cc84d60>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f116cc84fe0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f116cc85580>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f116cc85800>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f116cc84720>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f116cc849a0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 4,
1144
+ "non_truncated": 14146,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 4,
1148
+ "has_chat_template": true,
1149
+ "chat_type": "system_user_assistant",
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "cda1eebe5ba3912c900045ed7847600e29b22c64",
1153
+ "model_dtype": "torch.bfloat16",
1154
+ "model_memory_footprint": 14617722880,
1155
+ "model_num_parameters": 7241748480,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:1",
1160
+ "batch_size": 16,
1161
+ "max_length": 2560,
1162
+ "max_ctx_length": 2528,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1584.7455065359477,
1174
+ "min_seq_length": 1561,
1175
+ "max_seq_length": 1651,
1176
+ "max_ctx_length": 2528,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1824.7455065359477,
1189
+ "min_seq_length": 1801,
1190
+ "max_seq_length": 1891,
1191
+ "max_ctx_length": 2528,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 2,
1199
+ "non_truncated": 717,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 2,
1203
+ "mean_seq_length": 1782.9262865090404,
1204
+ "min_seq_length": 1406,
1205
+ "max_seq_length": 2583,
1206
+ "max_ctx_length": 2528,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 2.9972183588317107
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 2,
1214
+ "non_truncated": 1427,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 2,
1218
+ "mean_seq_length": 1683.039188243527,
1219
+ "min_seq_length": 1417,
1220
+ "max_seq_length": 2681,
1221
+ "max_ctx_length": 2528,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 2.998600419874038
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1825.9876923076922,
1234
+ "min_seq_length": 1770,
1235
+ "max_seq_length": 1946,
1236
+ "max_ctx_length": 2528,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1676.3878571428572,
1249
+ "min_seq_length": 1653,
1250
+ "max_seq_length": 1927,
1251
+ "max_ctx_length": 2528,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1428.764464692483,
1264
+ "min_seq_length": 1162,
1265
+ "max_seq_length": 1931,
1266
+ "max_ctx_length": 2528,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 2177.3360752056406,
1279
+ "min_seq_length": 2142,
1280
+ "max_seq_length": 2216,
1281
+ "max_ctx_length": 2528,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1923.2492537313433,
1294
+ "min_seq_length": 1902,
1295
+ "max_seq_length": 2018,
1296
+ "max_ctx_length": 2528,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=FuseAI/FuseChat-7B-VaRM,dtype=bfloat16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
FuseAI/FuseChat-7B-VaRM/results_2024-03-08T15-26-39.517660.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6749035343197202,
38
- "all_grouped_npm": 0.5201529644708365,
39
  "all_grouped": {
40
  "enem_challenge": 0.6480055983205039,
41
  "bluex": 0.5493741307371349,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.787259111855886,
46
  "hatebr_offensive": 0.8223021238433512,
47
  "portuguese_hate_speech": 0.6973371097488426,
48
- "tweetsentbr": 0.44067858320963216
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6480055983205039,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.787259111855886,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8223021238433512,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6973371097488426,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.44067858320963216
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6480055983205039,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6973371097488426
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.44067858320963216,
154
  "acc,all": 0.6666666666666666,
155
- "main_score": 0.44067858320963216
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6912249633274844,
38
+ "all_grouped_npm": 0.5444408052561999,
39
  "all_grouped": {
40
  "enem_challenge": 0.6480055983205039,
41
  "bluex": 0.5493741307371349,
 
45
  "faquad_nli": 0.787259111855886,
46
  "hatebr_offensive": 0.8223021238433512,
47
  "portuguese_hate_speech": 0.6973371097488426,
48
+ "tweetsentbr": 0.5875714442795096
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6480055983205039,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.787259111855886,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8223021238433512,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6973371097488426,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.5875714442795096
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6480055983205039,
 
150
  "main_score": 0.6973371097488426
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.5875714442795096,
154
  "acc,all": 0.6666666666666666,
155
+ "main_score": 0.5875714442795096
156
  }
157
  },
158
  "config_tasks": {
GritLM/GritLM-7B-KTO/raw_2024-06-15T00-03-38.180499/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9203424859265217,
5
- "acc,all": 0.9203431372549019,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7712035054624826,
10
- "mse,all": 0.5456045751633987,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5104311543810849,
15
- "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
16
- "acc,exam_id__USP_2019": 0.4,
17
- "acc,exam_id__USP_2018": 0.5,
18
- "acc,exam_id__USP_2020": 0.5714285714285714,
19
- "acc,exam_id__UNICAMP_2019": 0.5,
20
- "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
21
- "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
22
- "acc,exam_id__UNICAMP_2020": 0.509090909090909,
23
- "acc,exam_id__USP_2021": 0.5,
24
- "acc,exam_id__USP_2023": 0.6818181818181818,
25
- "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
26
- "acc,exam_id__UNICAMP_2021_1": 0.5,
27
- "acc,exam_id__USP_2022": 0.5714285714285714,
28
- "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826,
29
- "acc,exam_id__USP_2024": 0.6341463414634146,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.5808257522743177,
35
- "acc,exam_id__2017": 0.603448275862069,
36
- "acc,exam_id__2014": 0.5871559633027523,
37
- "acc,exam_id__2010": 0.5982905982905983,
38
- "acc,exam_id__2011": 0.6239316239316239,
39
- "acc,exam_id__2015": 0.5966386554621849,
40
- "acc,exam_id__2013": 0.5462962962962963,
41
- "acc,exam_id__2023": 0.5925925925925926,
42
- "acc,exam_id__2022": 0.6165413533834586,
43
- "acc,exam_id__2016_2": 0.5691056910569106,
44
- "acc,exam_id__2012": 0.5517241379310345,
45
- "acc,exam_id__2016": 0.5537190082644629,
46
- "acc,exam_id__2009": 0.5217391304347826
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7270998493492381,
50
- "acc,all": 0.7646153846153846,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.7778660429262838,
56
- "acc,all": 0.7842857142857143
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.4031890660592255,
60
- "acc,exam_id__2015-17": 0.5,
61
- "acc,exam_id__2015-18": 0.425,
62
- "acc,exam_id__2014-14": 0.4875,
63
- "acc,exam_id__2012-09": 0.37662337662337664,
64
- "acc,exam_id__2016-20a": 0.3375,
65
- "acc,exam_id__2012-06": 0.425,
66
- "acc,exam_id__2017-23": 0.375,
67
- "acc,exam_id__2010-01": 0.4,
68
- "acc,exam_id__2011-04": 0.3125,
69
- "acc,exam_id__2011-05": 0.4375,
70
- "acc,exam_id__2014-13": 0.3125,
71
- "acc,exam_id__2016-20": 0.35,
72
- "acc,exam_id__2013-10": 0.275,
73
- "acc,exam_id__2016-19": 0.46153846153846156,
74
- "acc,exam_id__2016-21": 0.3375,
75
- "acc,exam_id__2012-06a": 0.4,
76
- "acc,exam_id__2012-07": 0.4375,
77
- "acc,exam_id__2015-16": 0.4,
78
- "acc,exam_id__2013-11": 0.4125,
79
- "acc,exam_id__2018-25": 0.475,
80
- "acc,exam_id__2017-24": 0.3875,
81
- "acc,exam_id__2010-02": 0.44,
82
- "acc,exam_id__2012-08": 0.4125,
83
- "acc,exam_id__2014-15": 0.47435897435897434,
84
- "acc,exam_id__2011-03": 0.3434343434343434,
85
- "acc,exam_id__2013-12": 0.4875,
86
- "acc,exam_id__2017-22": 0.4125,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.71169854421558,
92
- "acc,all": 0.7532314923619271
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.4509015535405497,
96
- "acc,all": 0.673134328358209,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7feb2c64b560>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7feb2c64b600>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7feb2c64b880>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7feb2c64a7a0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7feb2c64b380>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7feb2c64aca0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7feb2c64af20>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "b5c48669508c1de18c698460c187f64e90e7df44",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 15020343296,
1075
- "model_num_parameters": 7241732096,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 8,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1554.7455065359477,
1094
- "min_seq_length": 1531,
1095
- "max_seq_length": 1621,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1754.7455065359477,
1109
- "min_seq_length": 1731,
1110
- "max_seq_length": 1821,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1705.9262865090404,
1124
- "min_seq_length": 1329,
1125
- "max_seq_length": 2506,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1606.039188243527,
1139
- "min_seq_length": 1340,
1140
- "max_seq_length": 2604,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1765.9876923076922,
1154
- "min_seq_length": 1710,
1155
- "max_seq_length": 1886,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1633.3878571428572,
1169
- "min_seq_length": 1610,
1170
- "max_seq_length": 1884,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1350.764464692483,
1184
- "min_seq_length": 1084,
1185
- "max_seq_length": 1853,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 2131.3360752056406,
1199
- "min_seq_length": 2096,
1200
- "max_seq_length": 2170,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1877.2492537313433,
1214
- "min_seq_length": 1856,
1215
- "max_seq_length": 1972,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=GritLM/GritLM-7B-KTO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "2d67fba"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9203424859265217,
5
+ "acc,all": 0.9203431372549019,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7712035054624826,
10
+ "mse,all": 0.5456045751633987,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5104311543810849,
15
+ "acc,exam_id__UNICAMP_2023": 0.4883720930232558,
16
+ "acc,exam_id__USP_2019": 0.4,
17
+ "acc,exam_id__USP_2018": 0.5,
18
+ "acc,exam_id__USP_2020": 0.5714285714285714,
19
+ "acc,exam_id__UNICAMP_2019": 0.5,
20
+ "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
21
+ "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
22
+ "acc,exam_id__UNICAMP_2020": 0.509090909090909,
23
+ "acc,exam_id__USP_2021": 0.5,
24
+ "acc,exam_id__USP_2023": 0.6818181818181818,
25
+ "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
26
+ "acc,exam_id__UNICAMP_2021_1": 0.5,
27
+ "acc,exam_id__USP_2022": 0.5714285714285714,
28
+ "acc,exam_id__UNICAMP_2021_2": 0.35294117647058826,
29
+ "acc,exam_id__USP_2024": 0.6341463414634146,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.5808257522743177,
35
+ "acc,exam_id__2017": 0.603448275862069,
36
+ "acc,exam_id__2014": 0.5871559633027523,
37
+ "acc,exam_id__2010": 0.5982905982905983,
38
+ "acc,exam_id__2011": 0.6239316239316239,
39
+ "acc,exam_id__2015": 0.5966386554621849,
40
+ "acc,exam_id__2013": 0.5462962962962963,
41
+ "acc,exam_id__2023": 0.5925925925925926,
42
+ "acc,exam_id__2022": 0.6165413533834586,
43
+ "acc,exam_id__2016_2": 0.5691056910569106,
44
+ "acc,exam_id__2012": 0.5517241379310345,
45
+ "acc,exam_id__2016": 0.5537190082644629,
46
+ "acc,exam_id__2009": 0.5217391304347826
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7270998493492381,
50
+ "acc,all": 0.7646153846153846,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.7778660429262838,
56
+ "acc,all": 0.7842857142857143
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.4031890660592255,
60
+ "acc,exam_id__2015-17": 0.5,
61
+ "acc,exam_id__2015-18": 0.425,
62
+ "acc,exam_id__2014-14": 0.4875,
63
+ "acc,exam_id__2012-09": 0.37662337662337664,
64
+ "acc,exam_id__2016-20a": 0.3375,
65
+ "acc,exam_id__2012-06": 0.425,
66
+ "acc,exam_id__2017-23": 0.375,
67
+ "acc,exam_id__2010-01": 0.4,
68
+ "acc,exam_id__2011-04": 0.3125,
69
+ "acc,exam_id__2011-05": 0.4375,
70
+ "acc,exam_id__2014-13": 0.3125,
71
+ "acc,exam_id__2016-20": 0.35,
72
+ "acc,exam_id__2013-10": 0.275,
73
+ "acc,exam_id__2016-19": 0.46153846153846156,
74
+ "acc,exam_id__2016-21": 0.3375,
75
+ "acc,exam_id__2012-06a": 0.4,
76
+ "acc,exam_id__2012-07": 0.4375,
77
+ "acc,exam_id__2015-16": 0.4,
78
+ "acc,exam_id__2013-11": 0.4125,
79
+ "acc,exam_id__2018-25": 0.475,
80
+ "acc,exam_id__2017-24": 0.3875,
81
+ "acc,exam_id__2010-02": 0.44,
82
+ "acc,exam_id__2012-08": 0.4125,
83
+ "acc,exam_id__2014-15": 0.47435897435897434,
84
+ "acc,exam_id__2011-03": 0.3434343434343434,
85
+ "acc,exam_id__2013-12": 0.4875,
86
+ "acc,exam_id__2017-22": 0.4125,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.71169854421558,
92
+ "acc,all": 0.7532314923619271
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6012020713873997,
96
+ "acc,all": 0.673134328358209,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7feb2c64b560>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7feb2c64b600>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7feb2c64b880>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7feb2c64a7a0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7feb2c64b380>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7feb2c64aca0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7feb2c64af20>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "b5c48669508c1de18c698460c187f64e90e7df44",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 15020343296,
1075
+ "model_num_parameters": 7241732096,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 8,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1554.7455065359477,
1094
+ "min_seq_length": 1531,
1095
+ "max_seq_length": 1621,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1754.7455065359477,
1109
+ "min_seq_length": 1731,
1110
+ "max_seq_length": 1821,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1705.9262865090404,
1124
+ "min_seq_length": 1329,
1125
+ "max_seq_length": 2506,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1606.039188243527,
1139
+ "min_seq_length": 1340,
1140
+ "max_seq_length": 2604,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1765.9876923076922,
1154
+ "min_seq_length": 1710,
1155
+ "max_seq_length": 1886,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1633.3878571428572,
1169
+ "min_seq_length": 1610,
1170
+ "max_seq_length": 1884,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1350.764464692483,
1184
+ "min_seq_length": 1084,
1185
+ "max_seq_length": 1853,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 2131.3360752056406,
1199
+ "min_seq_length": 2096,
1200
+ "max_seq_length": 2170,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1877.2492537313433,
1214
+ "min_seq_length": 1856,
1215
+ "max_seq_length": 1972,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=GritLM/GritLM-7B-KTO,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "2d67fba"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
GritLM/GritLM-7B-KTO/results_2024-06-15T00-03-38.180499.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6503953282372537,
38
- "all_grouped_npm": 0.48267510153705995,
39
  "all_grouped": {
40
  "enem_challenge": 0.5808257522743177,
41
  "bluex": 0.5104311543810849,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7270998493492381,
46
  "hatebr_offensive": 0.7778660429262838,
47
  "portuguese_hate_speech": 0.71169854421558,
48
- "tweetsentbr": 0.4509015535405497
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.5808257522743177,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7270998493492381,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7778660429262838,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.71169854421558,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.4509015535405497
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.5808257522743177,
@@ -150,9 +150,9 @@
150
  "main_score": 0.71169854421558
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.4509015535405497,
154
  "acc,all": 0.673134328358209,
155
- "main_score": 0.4509015535405497
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6670953857757926,
38
+ "all_grouped_npm": 0.5075263776360761,
39
  "all_grouped": {
40
  "enem_challenge": 0.5808257522743177,
41
  "bluex": 0.5104311543810849,
 
45
  "faquad_nli": 0.7270998493492381,
46
  "hatebr_offensive": 0.7778660429262838,
47
  "portuguese_hate_speech": 0.71169854421558,
48
+ "tweetsentbr": 0.6012020713873997
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.5808257522743177,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7270998493492381,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.7778660429262838,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.71169854421558,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6012020713873997
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.5808257522743177,
 
150
  "main_score": 0.71169854421558
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6012020713873997,
154
  "acc,all": 0.673134328358209,
155
+ "main_score": 0.6012020713873997
156
  }
157
  },
158
  "config_tasks": {
GritLM/GritLM-7B/raw_2024-06-12T20-31-09.833902/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9128900575777901,
5
- "acc,all": 0.9129901960784313,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7210345169515866,
10
- "mse,all": 0.6111764705882352,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.49235048678720444,
15
- "acc,exam_id__USP_2023": 0.6590909090909091,
16
- "acc,exam_id__USP_2021": 0.46153846153846156,
17
- "acc,exam_id__USP_2024": 0.6341463414634146,
18
- "acc,exam_id__USP_2020": 0.48214285714285715,
19
- "acc,exam_id__UNICAMP_2024": 0.5333333333333333,
20
- "acc,exam_id__USP_2022": 0.3469387755102041,
21
- "acc,exam_id__UNICAMP_2019": 0.46,
22
- "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
23
- "acc,exam_id__USP_2018": 0.48148148148148145,
24
- "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
25
- "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
26
- "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
27
- "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
28
- "acc,exam_id__USP_2019": 0.5,
29
- "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6116165150454863,
35
- "acc,exam_id__2014": 0.5596330275229358,
36
- "acc,exam_id__2009": 0.6347826086956522,
37
- "acc,exam_id__2017": 0.6293103448275862,
38
- "acc,exam_id__2016": 0.5619834710743802,
39
- "acc,exam_id__2012": 0.5603448275862069,
40
- "acc,exam_id__2013": 0.6203703703703703,
41
- "acc,exam_id__2015": 0.5798319327731093,
42
- "acc,exam_id__2023": 0.6444444444444445,
43
- "acc,exam_id__2022": 0.6541353383458647,
44
- "acc,exam_id__2010": 0.5811965811965812,
45
- "acc,exam_id__2016_2": 0.6178861788617886,
46
- "acc,exam_id__2011": 0.6837606837606838
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.8085014118262378,
50
- "acc,all": 0.8630769230769231,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8066603535353536,
56
- "acc,all": 0.81
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.40501138952164006,
60
- "acc,exam_id__2013-11": 0.375,
61
- "acc,exam_id__2011-05": 0.3875,
62
- "acc,exam_id__2016-20a": 0.325,
63
- "acc,exam_id__2018-25": 0.4,
64
- "acc,exam_id__2014-14": 0.4875,
65
- "acc,exam_id__2014-13": 0.375,
66
- "acc,exam_id__2017-23": 0.3875,
67
- "acc,exam_id__2010-02": 0.51,
68
- "acc,exam_id__2015-17": 0.5384615384615384,
69
- "acc,exam_id__2012-06a": 0.4125,
70
- "acc,exam_id__2012-09": 0.37662337662337664,
71
- "acc,exam_id__2016-20": 0.4,
72
- "acc,exam_id__2012-07": 0.3875,
73
- "acc,exam_id__2012-08": 0.4125,
74
- "acc,exam_id__2013-10": 0.3125,
75
- "acc,exam_id__2015-16": 0.3875,
76
- "acc,exam_id__2017-22": 0.3875,
77
- "acc,exam_id__2012-06": 0.4375,
78
- "acc,exam_id__2011-03": 0.35353535353535354,
79
- "acc,exam_id__2017-24": 0.425,
80
- "acc,exam_id__2014-15": 0.44871794871794873,
81
- "acc,exam_id__2015-18": 0.375,
82
- "acc,exam_id__2016-19": 0.48717948717948717,
83
- "acc,exam_id__2013-12": 0.4625,
84
- "acc,exam_id__2011-04": 0.325,
85
- "acc,exam_id__2016-21": 0.3875,
86
- "acc,exam_id__2010-01": 0.36470588235294116,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.714677578568343,
92
- "acc,all": 0.7473560517038778
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.4531216849342852,
96
- "acc,all": 0.6686567164179105,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f6ec25c0540>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f6ec2577ba0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6ec2577e20>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f6ec2577600>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6ec2577920>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f6ec2577240>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f6ec25774c0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "13f00a0e36500c80ce12870ea513846a066004af",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 15020343296,
1075
- "model_num_parameters": 7241732096,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 8,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1554.7455065359477,
1094
- "min_seq_length": 1531,
1095
- "max_seq_length": 1621,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1754.7455065359477,
1109
- "min_seq_length": 1731,
1110
- "max_seq_length": 1821,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1705.9262865090404,
1124
- "min_seq_length": 1329,
1125
- "max_seq_length": 2506,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1606.039188243527,
1139
- "min_seq_length": 1340,
1140
- "max_seq_length": 2604,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1765.9876923076922,
1154
- "min_seq_length": 1710,
1155
- "max_seq_length": 1886,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1633.3878571428572,
1169
- "min_seq_length": 1610,
1170
- "max_seq_length": 1884,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1350.764464692483,
1184
- "min_seq_length": 1084,
1185
- "max_seq_length": 1853,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 2131.3360752056406,
1199
- "min_seq_length": 2096,
1200
- "max_seq_length": 2170,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1877.2492537313433,
1214
- "min_seq_length": 1856,
1215
- "max_seq_length": 1972,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=GritLM/GritLM-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "f2a0116"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9128900575777901,
5
+ "acc,all": 0.9129901960784313,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7210345169515866,
10
+ "mse,all": 0.6111764705882352,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.49235048678720444,
15
+ "acc,exam_id__USP_2023": 0.6590909090909091,
16
+ "acc,exam_id__USP_2021": 0.46153846153846156,
17
+ "acc,exam_id__USP_2024": 0.6341463414634146,
18
+ "acc,exam_id__USP_2020": 0.48214285714285715,
19
+ "acc,exam_id__UNICAMP_2024": 0.5333333333333333,
20
+ "acc,exam_id__USP_2022": 0.3469387755102041,
21
+ "acc,exam_id__UNICAMP_2019": 0.46,
22
+ "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
23
+ "acc,exam_id__USP_2018": 0.48148148148148145,
24
+ "acc,exam_id__UNICAMP_2021_2": 0.39215686274509803,
25
+ "acc,exam_id__UNICAMP_2021_1": 0.4782608695652174,
26
+ "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
27
+ "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
28
+ "acc,exam_id__USP_2019": 0.5,
29
+ "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6116165150454863,
35
+ "acc,exam_id__2014": 0.5596330275229358,
36
+ "acc,exam_id__2009": 0.6347826086956522,
37
+ "acc,exam_id__2017": 0.6293103448275862,
38
+ "acc,exam_id__2016": 0.5619834710743802,
39
+ "acc,exam_id__2012": 0.5603448275862069,
40
+ "acc,exam_id__2013": 0.6203703703703703,
41
+ "acc,exam_id__2015": 0.5798319327731093,
42
+ "acc,exam_id__2023": 0.6444444444444445,
43
+ "acc,exam_id__2022": 0.6541353383458647,
44
+ "acc,exam_id__2010": 0.5811965811965812,
45
+ "acc,exam_id__2016_2": 0.6178861788617886,
46
+ "acc,exam_id__2011": 0.6837606837606838
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.8085014118262378,
50
+ "acc,all": 0.8630769230769231,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8066603535353536,
56
+ "acc,all": 0.81
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.40501138952164006,
60
+ "acc,exam_id__2013-11": 0.375,
61
+ "acc,exam_id__2011-05": 0.3875,
62
+ "acc,exam_id__2016-20a": 0.325,
63
+ "acc,exam_id__2018-25": 0.4,
64
+ "acc,exam_id__2014-14": 0.4875,
65
+ "acc,exam_id__2014-13": 0.375,
66
+ "acc,exam_id__2017-23": 0.3875,
67
+ "acc,exam_id__2010-02": 0.51,
68
+ "acc,exam_id__2015-17": 0.5384615384615384,
69
+ "acc,exam_id__2012-06a": 0.4125,
70
+ "acc,exam_id__2012-09": 0.37662337662337664,
71
+ "acc,exam_id__2016-20": 0.4,
72
+ "acc,exam_id__2012-07": 0.3875,
73
+ "acc,exam_id__2012-08": 0.4125,
74
+ "acc,exam_id__2013-10": 0.3125,
75
+ "acc,exam_id__2015-16": 0.3875,
76
+ "acc,exam_id__2017-22": 0.3875,
77
+ "acc,exam_id__2012-06": 0.4375,
78
+ "acc,exam_id__2011-03": 0.35353535353535354,
79
+ "acc,exam_id__2017-24": 0.425,
80
+ "acc,exam_id__2014-15": 0.44871794871794873,
81
+ "acc,exam_id__2015-18": 0.375,
82
+ "acc,exam_id__2016-19": 0.48717948717948717,
83
+ "acc,exam_id__2013-12": 0.4625,
84
+ "acc,exam_id__2011-04": 0.325,
85
+ "acc,exam_id__2016-21": 0.3875,
86
+ "acc,exam_id__2010-01": 0.36470588235294116,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.714677578568343,
92
+ "acc,all": 0.7473560517038778
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6041622465790469,
96
+ "acc,all": 0.6686567164179105,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f6ec25c0540>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f6ec2577ba0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6ec2577e20>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f6ec2577600>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f6ec2577920>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f6ec2577240>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f6ec25774c0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "13f00a0e36500c80ce12870ea513846a066004af",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 15020343296,
1075
+ "model_num_parameters": 7241732096,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 8,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1554.7455065359477,
1094
+ "min_seq_length": 1531,
1095
+ "max_seq_length": 1621,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1754.7455065359477,
1109
+ "min_seq_length": 1731,
1110
+ "max_seq_length": 1821,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1705.9262865090404,
1124
+ "min_seq_length": 1329,
1125
+ "max_seq_length": 2506,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1606.039188243527,
1139
+ "min_seq_length": 1340,
1140
+ "max_seq_length": 2604,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1765.9876923076922,
1154
+ "min_seq_length": 1710,
1155
+ "max_seq_length": 1886,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1633.3878571428572,
1169
+ "min_seq_length": 1610,
1170
+ "max_seq_length": 1884,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1350.764464692483,
1184
+ "min_seq_length": 1084,
1185
+ "max_seq_length": 1853,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 2131.3360752056406,
1199
+ "min_seq_length": 2096,
1200
+ "max_seq_length": 2170,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1877.2492537313433,
1214
+ "min_seq_length": 1856,
1215
+ "max_seq_length": 1972,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=GritLM/GritLM-7B,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "f2a0116"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
GritLM/GritLM-7B/results_2024-06-12T20-31-09.833902.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6584293327497696,
38
- "all_grouped_npm": 0.5014262136194367,
39
  "all_grouped": {
40
  "enem_challenge": 0.6116165150454863,
41
  "bluex": 0.49235048678720444,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.8085014118262378,
46
  "hatebr_offensive": 0.8066603535353536,
47
  "portuguese_hate_speech": 0.714677578568343,
48
- "tweetsentbr": 0.4531216849342852
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6116165150454863,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8085014118262378,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8066603535353536,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.714677578568343,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.4531216849342852
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6116165150454863,
@@ -150,9 +150,9 @@
150
  "main_score": 0.714677578568343
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.4531216849342852,
154
  "acc,all": 0.6686567164179105,
155
- "main_score": 0.4531216849342852
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6752116173769653,
38
+ "all_grouped_npm": 0.5263998514575255,
39
  "all_grouped": {
40
  "enem_challenge": 0.6116165150454863,
41
  "bluex": 0.49235048678720444,
 
45
  "faquad_nli": 0.8085014118262378,
46
  "hatebr_offensive": 0.8066603535353536,
47
  "portuguese_hate_speech": 0.714677578568343,
48
+ "tweetsentbr": 0.6041622465790469
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6116165150454863,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.8085014118262378,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8066603535353536,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.714677578568343,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6041622465790469
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6116165150454863,
 
150
  "main_score": 0.714677578568343
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6041622465790469,
154
  "acc,all": 0.6686567164179105,
155
+ "main_score": 0.6041622465790469
156
  }
157
  },
158
  "config_tasks": {
HuggingFaceH4/zephyr-7b-beta/raw_2024-02-21T23-57-52.146406/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.8836486323653452,
5
- "acc,all": 0.8839869281045751,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.6678266192299295,
10
- "mse,all": 0.6669526143790849,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.47983310152990266,
15
- "acc,exam_id__USP_2021": 0.36538461538461536,
16
- "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315,
17
- "acc,exam_id__UNICAMP_2023": 0.4418604651162791,
18
- "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476,
19
- "acc,exam_id__USP_2024": 0.7073170731707317,
20
- "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
21
- "acc,exam_id__USP_2022": 0.5510204081632653,
22
- "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
23
- "acc,exam_id__USP_2018": 0.5,
24
- "acc,exam_id__USP_2020": 0.42857142857142855,
25
- "acc,exam_id__UNICAMP_2022": 0.46153846153846156,
26
- "acc,exam_id__USP_2019": 0.425,
27
- "acc,exam_id__USP_2023": 0.5909090909090909,
28
- "acc,exam_id__UNICAMP_2019": 0.56,
29
- "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.5787263820853744,
35
- "acc,exam_id__2016_2": 0.5284552845528455,
36
- "acc,exam_id__2009": 0.591304347826087,
37
- "acc,exam_id__2011": 0.6666666666666666,
38
- "acc,exam_id__2012": 0.6206896551724138,
39
- "acc,exam_id__2013": 0.5925925925925926,
40
- "acc,exam_id__2016": 0.5537190082644629,
41
- "acc,exam_id__2022": 0.5037593984962406,
42
- "acc,exam_id__2023": 0.5777777777777777,
43
- "acc,exam_id__2010": 0.5555555555555556,
44
- "acc,exam_id__2014": 0.5963302752293578,
45
- "acc,exam_id__2015": 0.6218487394957983,
46
- "acc,exam_id__2017": 0.5517241379310345
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7017672651113582,
50
- "acc,all": 0.7384615384615385,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8176778106453834,
56
- "acc,all": 0.82
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.3931662870159453,
60
- "acc,exam_id__2010-02": 0.46,
61
- "acc,exam_id__2016-19": 0.5,
62
- "acc,exam_id__2015-17": 0.4358974358974359,
63
- "acc,exam_id__2016-21": 0.4,
64
- "acc,exam_id__2017-24": 0.325,
65
- "acc,exam_id__2012-09": 0.37662337662337664,
66
- "acc,exam_id__2011-04": 0.3125,
67
- "acc,exam_id__2017-23": 0.4375,
68
- "acc,exam_id__2011-03": 0.41414141414141414,
69
- "acc,exam_id__2012-07": 0.3375,
70
- "acc,exam_id__2012-06": 0.375,
71
- "acc,exam_id__2014-13": 0.35,
72
- "acc,exam_id__2016-20a": 0.225,
73
- "acc,exam_id__2011-05": 0.3875,
74
- "acc,exam_id__2015-18": 0.425,
75
- "acc,exam_id__2014-15": 0.5384615384615384,
76
- "acc,exam_id__2018-25": 0.4125,
77
- "acc,exam_id__2017-22": 0.425,
78
- "acc,exam_id__2013-11": 0.425,
79
- "acc,exam_id__2014-14": 0.3625,
80
- "acc,exam_id__2013-10": 0.3375,
81
- "acc,exam_id__2010-01": 0.32941176470588235,
82
- "acc,exam_id__2013-12": 0.475,
83
- "acc,exam_id__2015-16": 0.3875,
84
- "acc,exam_id__2012-06a": 0.3875,
85
- "acc,exam_id__2016-20": 0.3875,
86
- "acc,exam_id__2012-08": 0.375,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6658626171810755,
92
- "acc,all": 0.6886016451233843
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.46064331884597925,
96
- "acc,all": 0.6681592039800995,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd79f4a5120>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a4ae0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a4d60>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a5300>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a5580>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fd79f4a44a0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fd79f4a4720>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 0,
1144
- "non_truncated": 14150,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 0,
1148
- "has_chat_template": true,
1149
- "chat_type": "system_user_assistant",
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "dc24cabd13eacd3ae3a5fe574bd645483a335a4a",
1153
- "model_dtype": "torch.bfloat16",
1154
- "model_memory_footprint": 15020343296,
1155
- "model_num_parameters": 7241732096,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:0",
1160
- "batch_size": 16,
1161
- "max_length": 4096,
1162
- "max_ctx_length": 4064,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1646.7455065359477,
1174
- "min_seq_length": 1623,
1175
- "max_seq_length": 1713,
1176
- "max_ctx_length": 4064,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1855.7455065359477,
1189
- "min_seq_length": 1832,
1190
- "max_seq_length": 1922,
1191
- "max_ctx_length": 4064,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 0,
1199
- "non_truncated": 719,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 0,
1203
- "mean_seq_length": 1792.9262865090404,
1204
- "min_seq_length": 1416,
1205
- "max_seq_length": 2593,
1206
- "max_ctx_length": 4064,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 3.0
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 0,
1214
- "non_truncated": 1429,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 0,
1218
- "mean_seq_length": 1693.039188243527,
1219
- "min_seq_length": 1427,
1220
- "max_seq_length": 2691,
1221
- "max_ctx_length": 4064,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 3.0
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1871.9876923076922,
1234
- "min_seq_length": 1816,
1235
- "max_seq_length": 1992,
1236
- "max_ctx_length": 4064,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1752.3878571428572,
1249
- "min_seq_length": 1729,
1250
- "max_seq_length": 2003,
1251
- "max_ctx_length": 4064,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1438.764464692483,
1264
- "min_seq_length": 1172,
1265
- "max_seq_length": 1941,
1266
- "max_ctx_length": 4064,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 2253.3360752056406,
1279
- "min_seq_length": 2218,
1280
- "max_seq_length": 2292,
1281
- "max_ctx_length": 4064,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1999.2492537313433,
1294
- "min_seq_length": 1978,
1295
- "max_seq_length": 2094,
1296
- "max_ctx_length": 4064,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": "804df15"
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.8836486323653452,
5
+ "acc,all": 0.8839869281045751,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.6678266192299295,
10
+ "mse,all": 0.6669526143790849,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.47983310152990266,
15
+ "acc,exam_id__USP_2021": 0.36538461538461536,
16
+ "acc,exam_id__UNICAMP_2021_2": 0.37254901960784315,
17
+ "acc,exam_id__UNICAMP_2023": 0.4418604651162791,
18
+ "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476,
19
+ "acc,exam_id__USP_2024": 0.7073170731707317,
20
+ "acc,exam_id__UNICAMP_2018": 0.42592592592592593,
21
+ "acc,exam_id__USP_2022": 0.5510204081632653,
22
+ "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
23
+ "acc,exam_id__USP_2018": 0.5,
24
+ "acc,exam_id__USP_2020": 0.42857142857142855,
25
+ "acc,exam_id__UNICAMP_2022": 0.46153846153846156,
26
+ "acc,exam_id__USP_2019": 0.425,
27
+ "acc,exam_id__USP_2023": 0.5909090909090909,
28
+ "acc,exam_id__UNICAMP_2019": 0.56,
29
+ "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.5787263820853744,
35
+ "acc,exam_id__2016_2": 0.5284552845528455,
36
+ "acc,exam_id__2009": 0.591304347826087,
37
+ "acc,exam_id__2011": 0.6666666666666666,
38
+ "acc,exam_id__2012": 0.6206896551724138,
39
+ "acc,exam_id__2013": 0.5925925925925926,
40
+ "acc,exam_id__2016": 0.5537190082644629,
41
+ "acc,exam_id__2022": 0.5037593984962406,
42
+ "acc,exam_id__2023": 0.5777777777777777,
43
+ "acc,exam_id__2010": 0.5555555555555556,
44
+ "acc,exam_id__2014": 0.5963302752293578,
45
+ "acc,exam_id__2015": 0.6218487394957983,
46
+ "acc,exam_id__2017": 0.5517241379310345
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7017672651113582,
50
+ "acc,all": 0.7384615384615385,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8176778106453834,
56
+ "acc,all": 0.82
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.3931662870159453,
60
+ "acc,exam_id__2010-02": 0.46,
61
+ "acc,exam_id__2016-19": 0.5,
62
+ "acc,exam_id__2015-17": 0.4358974358974359,
63
+ "acc,exam_id__2016-21": 0.4,
64
+ "acc,exam_id__2017-24": 0.325,
65
+ "acc,exam_id__2012-09": 0.37662337662337664,
66
+ "acc,exam_id__2011-04": 0.3125,
67
+ "acc,exam_id__2017-23": 0.4375,
68
+ "acc,exam_id__2011-03": 0.41414141414141414,
69
+ "acc,exam_id__2012-07": 0.3375,
70
+ "acc,exam_id__2012-06": 0.375,
71
+ "acc,exam_id__2014-13": 0.35,
72
+ "acc,exam_id__2016-20a": 0.225,
73
+ "acc,exam_id__2011-05": 0.3875,
74
+ "acc,exam_id__2015-18": 0.425,
75
+ "acc,exam_id__2014-15": 0.5384615384615384,
76
+ "acc,exam_id__2018-25": 0.4125,
77
+ "acc,exam_id__2017-22": 0.425,
78
+ "acc,exam_id__2013-11": 0.425,
79
+ "acc,exam_id__2014-14": 0.3625,
80
+ "acc,exam_id__2013-10": 0.3375,
81
+ "acc,exam_id__2010-01": 0.32941176470588235,
82
+ "acc,exam_id__2013-12": 0.475,
83
+ "acc,exam_id__2015-16": 0.3875,
84
+ "acc,exam_id__2012-06a": 0.3875,
85
+ "acc,exam_id__2016-20": 0.3875,
86
+ "acc,exam_id__2012-08": 0.375,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6658626171810755,
92
+ "acc,all": 0.6886016451233843
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.614191091794639,
96
+ "acc,all": 0.6681592039800995,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd79f4a5120>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a4ae0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a4d60>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a5300>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a5580>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fd79f4a44a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fd79f4a4720>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 0,
1144
+ "non_truncated": 14150,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 0,
1148
+ "has_chat_template": true,
1149
+ "chat_type": "system_user_assistant",
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "dc24cabd13eacd3ae3a5fe574bd645483a335a4a",
1153
+ "model_dtype": "torch.bfloat16",
1154
+ "model_memory_footprint": 15020343296,
1155
+ "model_num_parameters": 7241732096,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 16,
1161
+ "max_length": 4096,
1162
+ "max_ctx_length": 4064,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1646.7455065359477,
1174
+ "min_seq_length": 1623,
1175
+ "max_seq_length": 1713,
1176
+ "max_ctx_length": 4064,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1855.7455065359477,
1189
+ "min_seq_length": 1832,
1190
+ "max_seq_length": 1922,
1191
+ "max_ctx_length": 4064,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 0,
1199
+ "non_truncated": 719,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 0,
1203
+ "mean_seq_length": 1792.9262865090404,
1204
+ "min_seq_length": 1416,
1205
+ "max_seq_length": 2593,
1206
+ "max_ctx_length": 4064,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 3.0
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 0,
1214
+ "non_truncated": 1429,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 0,
1218
+ "mean_seq_length": 1693.039188243527,
1219
+ "min_seq_length": 1427,
1220
+ "max_seq_length": 2691,
1221
+ "max_ctx_length": 4064,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 3.0
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1871.9876923076922,
1234
+ "min_seq_length": 1816,
1235
+ "max_seq_length": 1992,
1236
+ "max_ctx_length": 4064,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1752.3878571428572,
1249
+ "min_seq_length": 1729,
1250
+ "max_seq_length": 2003,
1251
+ "max_ctx_length": 4064,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1438.764464692483,
1264
+ "min_seq_length": 1172,
1265
+ "max_seq_length": 1941,
1266
+ "max_ctx_length": 4064,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 2253.3360752056406,
1279
+ "min_seq_length": 2218,
1280
+ "max_seq_length": 2292,
1281
+ "max_ctx_length": 4064,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1999.2492537313433,
1294
+ "min_seq_length": 1978,
1295
+ "max_seq_length": 2094,
1296
+ "max_ctx_length": 4064,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": "804df15"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
HuggingFaceH4/zephyr-7b-beta/results_2024-02-21T23-57-52.146406.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6276835593344771,
38
- "all_grouped_npm": 0.45237979734164724,
39
  "all_grouped": {
40
  "enem_challenge": 0.5787263820853744,
41
  "bluex": 0.47983310152990266,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7017672651113582,
46
  "hatebr_offensive": 0.8176778106453834,
47
  "portuguese_hate_speech": 0.6658626171810755,
48
- "tweetsentbr": 0.46064331884597925
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.5787263820853744,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7017672651113582,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8176778106453834,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6658626171810755,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.46064331884597925
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.5787263820853744,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6658626171810755
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.46064331884597925,
154
  "acc,all": 0.6681592039800995,
155
- "main_score": 0.46064331884597925
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6447444229954393,
38
+ "all_grouped_npm": 0.47776798731331716,
39
  "all_grouped": {
40
  "enem_challenge": 0.5787263820853744,
41
  "bluex": 0.47983310152990266,
 
45
  "faquad_nli": 0.7017672651113582,
46
  "hatebr_offensive": 0.8176778106453834,
47
  "portuguese_hate_speech": 0.6658626171810755,
48
+ "tweetsentbr": 0.614191091794639
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.5787263820853744,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7017672651113582,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8176778106453834,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6658626171810755,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.614191091794639
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.5787263820853744,
 
150
  "main_score": 0.6658626171810755
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.614191091794639,
154
  "acc,all": 0.6681592039800995,
155
+ "main_score": 0.614191091794639
156
  }
157
  },
158
  "config_tasks": {
HuggingFaceTB/SmolLM-1.7B-Instruct/raw_2024-07-29T01-29-23.830440/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.39689976031382207,
5
- "acc,all": 0.5755718954248366,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.011089425612064035,
10
- "mse,all": 2.9864903741798092,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.18497913769123783,
15
- "acc,exam_id__USP_2018": 0.1111111111111111,
16
- "acc,exam_id__USP_2024": 0.1951219512195122,
17
- "acc,exam_id__UNICAMP_2019": 0.14,
18
- "acc,exam_id__UNICAMP_2023": 0.32558139534883723,
19
- "acc,exam_id__USP_2021": 0.11538461538461539,
20
- "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608,
21
- "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433,
22
- "acc,exam_id__USP_2020": 0.19642857142857142,
23
- "acc,exam_id__USP_2022": 0.12244897959183673,
24
- "acc,exam_id__UNICAMP_2020": 0.23636363636363636,
25
- "acc,exam_id__UNICAMP_2024": 0.3111111111111111,
26
- "acc,exam_id__USP_2023": 0.1590909090909091,
27
- "acc,exam_id__UNICAMP_2018": 0.2222222222222222,
28
- "acc,exam_id__USP_2019": 0.15,
29
- "acc,exam_id__UNICAMP_2022": 0.15384615384615385,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.15185444366689993,
35
- "acc,exam_id__2017": 0.15517241379310345,
36
- "acc,exam_id__2009": 0.09565217391304348,
37
- "acc,exam_id__2013": 0.19444444444444445,
38
- "acc,exam_id__2023": 0.2,
39
- "acc,exam_id__2011": 0.13675213675213677,
40
- "acc,exam_id__2010": 0.17094017094017094,
41
- "acc,exam_id__2016_2": 0.13008130081300814,
42
- "acc,exam_id__2014": 0.14678899082568808,
43
- "acc,exam_id__2022": 0.15789473684210525,
44
- "acc,exam_id__2015": 0.11764705882352941,
45
- "acc,exam_id__2012": 0.16379310344827586,
46
- "acc,exam_id__2016": 0.1487603305785124
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.17821782178217824,
50
- "acc,all": 0.3323076923076923,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.24931756852830744,
56
- "acc,all": 0.3842857142857143
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.2214123006833713,
60
- "acc,exam_id__2011-05": 0.2125,
61
- "acc,exam_id__2014-15": 0.21794871794871795,
62
- "acc,exam_id__2016-21": 0.2125,
63
- "acc,exam_id__2014-13": 0.1875,
64
- "acc,exam_id__2015-18": 0.2,
65
- "acc,exam_id__2016-20": 0.2375,
66
- "acc,exam_id__2017-22": 0.1875,
67
- "acc,exam_id__2015-17": 0.21794871794871795,
68
- "acc,exam_id__2017-24": 0.2375,
69
- "acc,exam_id__2011-03": 0.20202020202020202,
70
- "acc,exam_id__2013-12": 0.175,
71
- "acc,exam_id__2012-08": 0.1625,
72
- "acc,exam_id__2016-19": 0.2564102564102564,
73
- "acc,exam_id__2018-25": 0.25,
74
- "acc,exam_id__2014-14": 0.2,
75
- "acc,exam_id__2016-20a": 0.2875,
76
- "acc,exam_id__2012-07": 0.25,
77
- "acc,exam_id__2017-23": 0.2625,
78
- "acc,exam_id__2013-10": 0.225,
79
- "acc,exam_id__2010-01": 0.16470588235294117,
80
- "acc,exam_id__2013-11": 0.225,
81
- "acc,exam_id__2015-16": 0.2125,
82
- "acc,exam_id__2011-04": 0.275,
83
- "acc,exam_id__2012-09": 0.24675324675324675,
84
- "acc,exam_id__2012-06a": 0.2625,
85
- "acc,exam_id__2012-06": 0.2375,
86
- "acc,exam_id__2010-02": 0.19,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.07734989648033126,
92
- "acc,all": 0.0846063454759107
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.11774990858411193,
96
- "acc,all": 0.2880597014925373,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f0d5c0de8e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f0d5c0de2a0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f0d5c0de520>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f0d5c0deac0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f0d5c0ded40>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f0d5c0ddc60>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f0d5c0ddee0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1220,
1064
- "non_truncated": 12930,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 2128,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "df32c35db9f85f9c44997dcf694d1a79e944ff69",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 3422755968,
1075
- "model_num_parameters": 1711376384,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 32,
1081
- "max_length": 2048,
1082
- "max_ctx_length": 2016,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1618.1425653594772,
1094
- "min_seq_length": 1593,
1095
- "max_seq_length": 1692,
1096
- "max_ctx_length": 2016,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1844.1425653594772,
1109
- "min_seq_length": 1819,
1110
- "max_seq_length": 1918,
1111
- "max_ctx_length": 2016,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 205,
1119
- "non_truncated": 514,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 267,
1123
- "mean_seq_length": 1882.730180806676,
1124
- "min_seq_length": 1479,
1125
- "max_seq_length": 2749,
1126
- "max_ctx_length": 2016,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.6286509040333796
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 162,
1134
- "non_truncated": 1267,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 170,
1138
- "mean_seq_length": 1803.6703988803358,
1139
- "min_seq_length": 1508,
1140
- "max_seq_length": 2796,
1141
- "max_ctx_length": 2016,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.881035689293212
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1875.5692307692307,
1154
- "min_seq_length": 1815,
1155
- "max_seq_length": 2009,
1156
- "max_ctx_length": 2016,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1568.8021428571428,
1169
- "min_seq_length": 1543,
1170
- "max_seq_length": 1844,
1171
- "max_ctx_length": 2016,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 2,
1179
- "non_truncated": 2193,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 2,
1183
- "mean_seq_length": 1567.8610478359908,
1184
- "min_seq_length": 1267,
1185
- "max_seq_length": 2116,
1186
- "max_ctx_length": 2016,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 2.9990888382687926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 851,
1194
- "non_truncated": 0,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 1689,
1198
- "mean_seq_length": 2138.0317273795536,
1199
- "min_seq_length": 2101,
1200
- "max_seq_length": 2172,
1201
- "max_ctx_length": 2016,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 23.015276145710928
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1809.4268656716417,
1214
- "min_seq_length": 1788,
1215
- "max_seq_length": 1870,
1216
- "max_ctx_length": 2016,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=HuggingFaceTB/SmolLM-1.7B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.5953496404707331,
5
+ "acc,all": 0.5755718954248366,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.011089425612064035,
10
+ "mse,all": 2.9864903741798092,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.18497913769123783,
15
+ "acc,exam_id__USP_2018": 0.1111111111111111,
16
+ "acc,exam_id__USP_2024": 0.1951219512195122,
17
+ "acc,exam_id__UNICAMP_2019": 0.14,
18
+ "acc,exam_id__UNICAMP_2023": 0.32558139534883723,
19
+ "acc,exam_id__USP_2021": 0.11538461538461539,
20
+ "acc,exam_id__UNICAMP_2021_1": 0.21739130434782608,
21
+ "acc,exam_id__UNICAMP_2021_2": 0.13725490196078433,
22
+ "acc,exam_id__USP_2020": 0.19642857142857142,
23
+ "acc,exam_id__USP_2022": 0.12244897959183673,
24
+ "acc,exam_id__UNICAMP_2020": 0.23636363636363636,
25
+ "acc,exam_id__UNICAMP_2024": 0.3111111111111111,
26
+ "acc,exam_id__USP_2023": 0.1590909090909091,
27
+ "acc,exam_id__UNICAMP_2018": 0.2222222222222222,
28
+ "acc,exam_id__USP_2019": 0.15,
29
+ "acc,exam_id__UNICAMP_2022": 0.15384615384615385,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.15185444366689993,
35
+ "acc,exam_id__2017": 0.15517241379310345,
36
+ "acc,exam_id__2009": 0.09565217391304348,
37
+ "acc,exam_id__2013": 0.19444444444444445,
38
+ "acc,exam_id__2023": 0.2,
39
+ "acc,exam_id__2011": 0.13675213675213677,
40
+ "acc,exam_id__2010": 0.17094017094017094,
41
+ "acc,exam_id__2016_2": 0.13008130081300814,
42
+ "acc,exam_id__2014": 0.14678899082568808,
43
+ "acc,exam_id__2022": 0.15789473684210525,
44
+ "acc,exam_id__2015": 0.11764705882352941,
45
+ "acc,exam_id__2012": 0.16379310344827586,
46
+ "acc,exam_id__2016": 0.1487603305785124
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.26732673267326734,
50
+ "acc,all": 0.3323076923076923,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.3739763527924611,
56
+ "acc,all": 0.3842857142857143
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.2214123006833713,
60
+ "acc,exam_id__2011-05": 0.2125,
61
+ "acc,exam_id__2014-15": 0.21794871794871795,
62
+ "acc,exam_id__2016-21": 0.2125,
63
+ "acc,exam_id__2014-13": 0.1875,
64
+ "acc,exam_id__2015-18": 0.2,
65
+ "acc,exam_id__2016-20": 0.2375,
66
+ "acc,exam_id__2017-22": 0.1875,
67
+ "acc,exam_id__2015-17": 0.21794871794871795,
68
+ "acc,exam_id__2017-24": 0.2375,
69
+ "acc,exam_id__2011-03": 0.20202020202020202,
70
+ "acc,exam_id__2013-12": 0.175,
71
+ "acc,exam_id__2012-08": 0.1625,
72
+ "acc,exam_id__2016-19": 0.2564102564102564,
73
+ "acc,exam_id__2018-25": 0.25,
74
+ "acc,exam_id__2014-14": 0.2,
75
+ "acc,exam_id__2016-20a": 0.2875,
76
+ "acc,exam_id__2012-07": 0.25,
77
+ "acc,exam_id__2017-23": 0.2625,
78
+ "acc,exam_id__2013-10": 0.225,
79
+ "acc,exam_id__2010-01": 0.16470588235294117,
80
+ "acc,exam_id__2013-11": 0.225,
81
+ "acc,exam_id__2015-16": 0.2125,
82
+ "acc,exam_id__2011-04": 0.275,
83
+ "acc,exam_id__2012-09": 0.24675324675324675,
84
+ "acc,exam_id__2012-06a": 0.2625,
85
+ "acc,exam_id__2012-06": 0.2375,
86
+ "acc,exam_id__2010-02": 0.19,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.1160248447204969,
92
+ "acc,all": 0.0846063454759107
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.15699987811214924,
96
+ "acc,all": 0.2880597014925373,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f0d5c0de8e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f0d5c0de2a0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f0d5c0de520>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f0d5c0deac0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f0d5c0ded40>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f0d5c0ddc60>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f0d5c0ddee0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1220,
1064
+ "non_truncated": 12930,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 2128,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "df32c35db9f85f9c44997dcf694d1a79e944ff69",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 3422755968,
1075
+ "model_num_parameters": 1711376384,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 32,
1081
+ "max_length": 2048,
1082
+ "max_ctx_length": 2016,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1618.1425653594772,
1094
+ "min_seq_length": 1593,
1095
+ "max_seq_length": 1692,
1096
+ "max_ctx_length": 2016,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1844.1425653594772,
1109
+ "min_seq_length": 1819,
1110
+ "max_seq_length": 1918,
1111
+ "max_ctx_length": 2016,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 205,
1119
+ "non_truncated": 514,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 267,
1123
+ "mean_seq_length": 1882.730180806676,
1124
+ "min_seq_length": 1479,
1125
+ "max_seq_length": 2749,
1126
+ "max_ctx_length": 2016,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.6286509040333796
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 162,
1134
+ "non_truncated": 1267,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 170,
1138
+ "mean_seq_length": 1803.6703988803358,
1139
+ "min_seq_length": 1508,
1140
+ "max_seq_length": 2796,
1141
+ "max_ctx_length": 2016,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.881035689293212
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1875.5692307692307,
1154
+ "min_seq_length": 1815,
1155
+ "max_seq_length": 2009,
1156
+ "max_ctx_length": 2016,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1568.8021428571428,
1169
+ "min_seq_length": 1543,
1170
+ "max_seq_length": 1844,
1171
+ "max_ctx_length": 2016,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 2,
1179
+ "non_truncated": 2193,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 2,
1183
+ "mean_seq_length": 1567.8610478359908,
1184
+ "min_seq_length": 1267,
1185
+ "max_seq_length": 2116,
1186
+ "max_ctx_length": 2016,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 2.9990888382687926
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 851,
1194
+ "non_truncated": 0,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 1689,
1198
+ "mean_seq_length": 2138.0317273795536,
1199
+ "min_seq_length": 2101,
1200
+ "max_seq_length": 2172,
1201
+ "max_ctx_length": 2016,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 23.015276145710928
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1809.4268656716417,
1214
+ "min_seq_length": 1788,
1215
+ "max_seq_length": 1870,
1216
+ "max_ctx_length": 2016,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=HuggingFaceTB/SmolLM-1.7B-Instruct,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
HuggingFaceTB/SmolLM-1.7B-Instruct/results_2024-07-29T01-29-23.830440.json CHANGED
@@ -34,29 +34,29 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.17654114037136934,
38
- "all_grouped_npm": -0.2712041427416204,
39
  "all_grouped": {
40
  "enem_challenge": 0.15185444366689993,
41
  "bluex": 0.18497913769123783,
42
  "oab_exams": 0.2214123006833713,
43
- "assin2_rte": 0.39689976031382207,
44
  "assin2_sts": 0.011089425612064035,
45
- "faquad_nli": 0.17821782178217824,
46
- "hatebr_offensive": 0.24931756852830744,
47
- "portuguese_hate_speech": 0.07734989648033126,
48
- "tweetsentbr": 0.11774990858411193
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.15185444366689993,
52
  "harness|bluex|bluex|None|3": 0.18497913769123783,
53
  "harness|oab_exams|oab_exams|None|3": 0.2214123006833713,
54
- "harness|assin2_rte|assin2_rte|None|15": 0.39689976031382207,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.011089425612064035,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.17821782178217824,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.24931756852830744,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.07734989648033126,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.11774990858411193
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.15185444366689993,
@@ -125,9 +125,9 @@
125
  "main_score": 0.2214123006833713
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
- "f1_macro,all": 0.39689976031382207,
129
  "acc,all": 0.5755718954248366,
130
- "main_score": 0.39689976031382207
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.011089425612064035,
@@ -135,24 +135,24 @@
135
  "main_score": 0.011089425612064035
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.17821782178217824,
139
  "acc,all": 0.3323076923076923,
140
- "main_score": 0.17821782178217824
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.24931756852830744,
144
  "acc,all": 0.3842857142857143,
145
- "main_score": 0.24931756852830744
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.07734989648033126,
149
  "acc,all": 0.0846063454759107,
150
- "main_score": 0.07734989648033126
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.11774990858411193,
154
  "acc,all": 0.2880597014925373,
155
- "main_score": 0.11774990858411193
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.23100141738029784,
38
+ "all_grouped_npm": -0.166464107969747,
39
  "all_grouped": {
40
  "enem_challenge": 0.15185444366689993,
41
  "bluex": 0.18497913769123783,
42
  "oab_exams": 0.2214123006833713,
43
+ "assin2_rte": 0.5953496404707331,
44
  "assin2_sts": 0.011089425612064035,
45
+ "faquad_nli": 0.26732673267326734,
46
+ "hatebr_offensive": 0.3739763527924611,
47
+ "portuguese_hate_speech": 0.1160248447204969,
48
+ "tweetsentbr": 0.15699987811214924
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.15185444366689993,
52
  "harness|bluex|bluex|None|3": 0.18497913769123783,
53
  "harness|oab_exams|oab_exams|None|3": 0.2214123006833713,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.5953496404707331,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.011089425612064035,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.26732673267326734,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3739763527924611,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.1160248447204969,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.15699987811214924
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.15185444366689993,
 
125
  "main_score": 0.2214123006833713
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.5953496404707331,
129
  "acc,all": 0.5755718954248366,
130
+ "main_score": 0.5953496404707331
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.011089425612064035,
 
135
  "main_score": 0.011089425612064035
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.26732673267326734,
139
  "acc,all": 0.3323076923076923,
140
+ "main_score": 0.26732673267326734
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.3739763527924611,
144
  "acc,all": 0.3842857142857143,
145
+ "main_score": 0.3739763527924611
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.1160248447204969,
149
  "acc,all": 0.0846063454759107,
150
+ "main_score": 0.1160248447204969
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.15699987811214924,
154
  "acc,all": 0.2880597014925373,
155
+ "main_score": 0.15699987811214924
156
  }
157
  },
158
  "config_tasks": {
HuggingFaceTB/SmolLM-135M-Instruct/raw_2024-07-24T14-22-33.098781/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.26300477210440537,
5
- "acc,all": 0.2839052287581699,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.03362436337787243,
10
- "mse,all": 4.79703431372549,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.1627260083449235,
15
- "acc,exam_id__USP_2018": 0.1111111111111111,
16
- "acc,exam_id__USP_2021": 0.15384615384615385,
17
- "acc,exam_id__UNICAMP_2018": 0.18518518518518517,
18
- "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
19
- "acc,exam_id__UNICAMP_2022": 0.20512820512820512,
20
- "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
21
- "acc,exam_id__USP_2019": 0.225,
22
- "acc,exam_id__USP_2022": 0.1836734693877551,
23
- "acc,exam_id__USP_2023": 0.06818181818181818,
24
- "acc,exam_id__UNICAMP_2021_2": 0.0784313725490196,
25
- "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
26
- "acc,exam_id__USP_2024": 0.07317073170731707,
27
- "acc,exam_id__USP_2020": 0.14285714285714285,
28
- "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739,
29
- "acc,exam_id__UNICAMP_2019": 0.14,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.1728481455563331,
35
- "acc,exam_id__2014": 0.1651376146788991,
36
- "acc,exam_id__2015": 0.13445378151260504,
37
- "acc,exam_id__2013": 0.1574074074074074,
38
- "acc,exam_id__2009": 0.1565217391304348,
39
- "acc,exam_id__2022": 0.17293233082706766,
40
- "acc,exam_id__2017": 0.15517241379310345,
41
- "acc,exam_id__2010": 0.15384615384615385,
42
- "acc,exam_id__2012": 0.16379310344827586,
43
- "acc,exam_id__2016": 0.18181818181818182,
44
- "acc,exam_id__2011": 0.18803418803418803,
45
- "acc,exam_id__2016_2": 0.17073170731707318,
46
- "acc,exam_id__2023": 0.25925925925925924
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.07058169545364304,
50
- "acc,all": 0.08461538461538462,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.06650348469472238,
56
- "acc,all": 0.05857142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.22369020501138953,
60
- "acc,exam_id__2016-21": 0.2,
61
- "acc,exam_id__2018-25": 0.275,
62
- "acc,exam_id__2017-22": 0.25,
63
- "acc,exam_id__2012-08": 0.225,
64
- "acc,exam_id__2017-23": 0.2,
65
- "acc,exam_id__2013-12": 0.15,
66
- "acc,exam_id__2011-05": 0.2375,
67
- "acc,exam_id__2016-20": 0.2,
68
- "acc,exam_id__2014-15": 0.20512820512820512,
69
- "acc,exam_id__2012-06a": 0.2375,
70
- "acc,exam_id__2016-20a": 0.2875,
71
- "acc,exam_id__2014-13": 0.2375,
72
- "acc,exam_id__2013-10": 0.2125,
73
- "acc,exam_id__2012-09": 0.22077922077922077,
74
- "acc,exam_id__2015-16": 0.2375,
75
- "acc,exam_id__2011-03": 0.24242424242424243,
76
- "acc,exam_id__2013-11": 0.15,
77
- "acc,exam_id__2016-19": 0.1794871794871795,
78
- "acc,exam_id__2014-14": 0.25,
79
- "acc,exam_id__2010-01": 0.25882352941176473,
80
- "acc,exam_id__2012-07": 0.1375,
81
- "acc,exam_id__2017-24": 0.225,
82
- "acc,exam_id__2015-18": 0.25,
83
- "acc,exam_id__2011-04": 0.25,
84
- "acc,exam_id__2012-06": 0.2375,
85
- "acc,exam_id__2010-02": 0.23,
86
- "acc,exam_id__2015-17": 0.24358974358974358,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.07431443242652681,
92
- "acc,all": 0.07638072855464159
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.10296765823081612,
96
- "acc,all": 0.14378109452736318,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f52406fd4e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f52406fcea0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd120>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f52406fd6c0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd940>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f52406fc860>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f52406fcae0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1220,
1064
- "non_truncated": 12930,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 2128,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "0a0a7c2a1b1dc8f75f1d5a6ac86d38e3e7bab014",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 269033984,
1075
- "model_num_parameters": 134515008,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 1,
1081
- "max_length": 2048,
1082
- "max_ctx_length": 2016,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1618.1425653594772,
1094
- "min_seq_length": 1593,
1095
- "max_seq_length": 1692,
1096
- "max_ctx_length": 2016,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1844.1425653594772,
1109
- "min_seq_length": 1819,
1110
- "max_seq_length": 1918,
1111
- "max_ctx_length": 2016,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 205,
1119
- "non_truncated": 514,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 267,
1123
- "mean_seq_length": 1882.730180806676,
1124
- "min_seq_length": 1479,
1125
- "max_seq_length": 2749,
1126
- "max_ctx_length": 2016,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.6286509040333796
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 162,
1134
- "non_truncated": 1267,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 170,
1138
- "mean_seq_length": 1803.6703988803358,
1139
- "min_seq_length": 1508,
1140
- "max_seq_length": 2796,
1141
- "max_ctx_length": 2016,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.881035689293212
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1875.5692307692307,
1154
- "min_seq_length": 1815,
1155
- "max_seq_length": 2009,
1156
- "max_ctx_length": 2016,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1568.8021428571428,
1169
- "min_seq_length": 1543,
1170
- "max_seq_length": 1844,
1171
- "max_ctx_length": 2016,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 2,
1179
- "non_truncated": 2193,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 2,
1183
- "mean_seq_length": 1567.8610478359908,
1184
- "min_seq_length": 1267,
1185
- "max_seq_length": 2116,
1186
- "max_ctx_length": 2016,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 2.9990888382687926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 851,
1194
- "non_truncated": 0,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 1689,
1198
- "mean_seq_length": 2138.0317273795536,
1199
- "min_seq_length": 2101,
1200
- "max_seq_length": 2172,
1201
- "max_ctx_length": 2016,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 23.015276145710928
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1809.4268656716417,
1214
- "min_seq_length": 1788,
1215
- "max_seq_length": 1870,
1216
- "max_ctx_length": 2016,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=HuggingFaceTB/SmolLM-135M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3945071581566081,
5
+ "acc,all": 0.2839052287581699,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.03362436337787243,
10
+ "mse,all": 4.79703431372549,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.1627260083449235,
15
+ "acc,exam_id__USP_2018": 0.1111111111111111,
16
+ "acc,exam_id__USP_2021": 0.15384615384615385,
17
+ "acc,exam_id__UNICAMP_2018": 0.18518518518518517,
18
+ "acc,exam_id__UNICAMP_2020": 0.16363636363636364,
19
+ "acc,exam_id__UNICAMP_2022": 0.20512820512820512,
20
+ "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
21
+ "acc,exam_id__USP_2019": 0.225,
22
+ "acc,exam_id__USP_2022": 0.1836734693877551,
23
+ "acc,exam_id__USP_2023": 0.06818181818181818,
24
+ "acc,exam_id__UNICAMP_2021_2": 0.0784313725490196,
25
+ "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
26
+ "acc,exam_id__USP_2024": 0.07317073170731707,
27
+ "acc,exam_id__USP_2020": 0.14285714285714285,
28
+ "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739,
29
+ "acc,exam_id__UNICAMP_2019": 0.14,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.1728481455563331,
35
+ "acc,exam_id__2014": 0.1651376146788991,
36
+ "acc,exam_id__2015": 0.13445378151260504,
37
+ "acc,exam_id__2013": 0.1574074074074074,
38
+ "acc,exam_id__2009": 0.1565217391304348,
39
+ "acc,exam_id__2022": 0.17293233082706766,
40
+ "acc,exam_id__2017": 0.15517241379310345,
41
+ "acc,exam_id__2010": 0.15384615384615385,
42
+ "acc,exam_id__2012": 0.16379310344827586,
43
+ "acc,exam_id__2016": 0.18181818181818182,
44
+ "acc,exam_id__2011": 0.18803418803418803,
45
+ "acc,exam_id__2016_2": 0.17073170731707318,
46
+ "acc,exam_id__2023": 0.25925925925925924
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.10587254318046457,
50
+ "acc,all": 0.08461538461538462,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.09975522704208357,
56
+ "acc,all": 0.05857142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.22369020501138953,
60
+ "acc,exam_id__2016-21": 0.2,
61
+ "acc,exam_id__2018-25": 0.275,
62
+ "acc,exam_id__2017-22": 0.25,
63
+ "acc,exam_id__2012-08": 0.225,
64
+ "acc,exam_id__2017-23": 0.2,
65
+ "acc,exam_id__2013-12": 0.15,
66
+ "acc,exam_id__2011-05": 0.2375,
67
+ "acc,exam_id__2016-20": 0.2,
68
+ "acc,exam_id__2014-15": 0.20512820512820512,
69
+ "acc,exam_id__2012-06a": 0.2375,
70
+ "acc,exam_id__2016-20a": 0.2875,
71
+ "acc,exam_id__2014-13": 0.2375,
72
+ "acc,exam_id__2013-10": 0.2125,
73
+ "acc,exam_id__2012-09": 0.22077922077922077,
74
+ "acc,exam_id__2015-16": 0.2375,
75
+ "acc,exam_id__2011-03": 0.24242424242424243,
76
+ "acc,exam_id__2013-11": 0.15,
77
+ "acc,exam_id__2016-19": 0.1794871794871795,
78
+ "acc,exam_id__2014-14": 0.25,
79
+ "acc,exam_id__2010-01": 0.25882352941176473,
80
+ "acc,exam_id__2012-07": 0.1375,
81
+ "acc,exam_id__2017-24": 0.225,
82
+ "acc,exam_id__2015-18": 0.25,
83
+ "acc,exam_id__2011-04": 0.25,
84
+ "acc,exam_id__2012-06": 0.2375,
85
+ "acc,exam_id__2010-02": 0.23,
86
+ "acc,exam_id__2015-17": 0.24358974358974358,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.11147164863979023,
92
+ "acc,all": 0.07638072855464159
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.1372902109744215,
96
+ "acc,all": 0.14378109452736318,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f52406fd4e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f52406fcea0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd120>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f52406fd6c0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd940>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f52406fc860>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f52406fcae0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1220,
1064
+ "non_truncated": 12930,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 2128,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "0a0a7c2a1b1dc8f75f1d5a6ac86d38e3e7bab014",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 269033984,
1075
+ "model_num_parameters": 134515008,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 1,
1081
+ "max_length": 2048,
1082
+ "max_ctx_length": 2016,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1618.1425653594772,
1094
+ "min_seq_length": 1593,
1095
+ "max_seq_length": 1692,
1096
+ "max_ctx_length": 2016,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1844.1425653594772,
1109
+ "min_seq_length": 1819,
1110
+ "max_seq_length": 1918,
1111
+ "max_ctx_length": 2016,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 205,
1119
+ "non_truncated": 514,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 267,
1123
+ "mean_seq_length": 1882.730180806676,
1124
+ "min_seq_length": 1479,
1125
+ "max_seq_length": 2749,
1126
+ "max_ctx_length": 2016,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.6286509040333796
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 162,
1134
+ "non_truncated": 1267,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 170,
1138
+ "mean_seq_length": 1803.6703988803358,
1139
+ "min_seq_length": 1508,
1140
+ "max_seq_length": 2796,
1141
+ "max_ctx_length": 2016,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.881035689293212
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1875.5692307692307,
1154
+ "min_seq_length": 1815,
1155
+ "max_seq_length": 2009,
1156
+ "max_ctx_length": 2016,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1568.8021428571428,
1169
+ "min_seq_length": 1543,
1170
+ "max_seq_length": 1844,
1171
+ "max_ctx_length": 2016,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 2,
1179
+ "non_truncated": 2193,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 2,
1183
+ "mean_seq_length": 1567.8610478359908,
1184
+ "min_seq_length": 1267,
1185
+ "max_seq_length": 2116,
1186
+ "max_ctx_length": 2016,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 2.9990888382687926
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 851,
1194
+ "non_truncated": 0,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 1689,
1198
+ "mean_seq_length": 2138.0317273795536,
1199
+ "min_seq_length": 2101,
1200
+ "max_seq_length": 2172,
1201
+ "max_ctx_length": 2016,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 23.015276145710928
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1809.4268656716417,
1214
+ "min_seq_length": 1788,
1215
+ "max_seq_length": 1870,
1216
+ "max_ctx_length": 2016,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=HuggingFaceTB/SmolLM-135M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
HuggingFaceTB/SmolLM-135M-Instruct/results_2024-07-24T14-22-33.098781.json CHANGED
@@ -34,29 +34,29 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.13002897391118137,
38
- "all_grouped_npm": -0.3640932247152165,
39
  "all_grouped": {
40
  "enem_challenge": 0.1728481455563331,
41
  "bluex": 0.1627260083449235,
42
  "oab_exams": 0.22369020501138953,
43
- "assin2_rte": 0.26300477210440537,
44
  "assin2_sts": 0.03362436337787243,
45
- "faquad_nli": 0.07058169545364304,
46
- "hatebr_offensive": 0.06650348469472238,
47
- "portuguese_hate_speech": 0.07431443242652681,
48
- "tweetsentbr": 0.10296765823081612
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.1728481455563331,
52
  "harness|bluex|bluex|None|3": 0.1627260083449235,
53
  "harness|oab_exams|oab_exams|None|3": 0.22369020501138953,
54
- "harness|assin2_rte|assin2_rte|None|15": 0.26300477210440537,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.03362436337787243,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.07058169545364304,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.06650348469472238,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.07431443242652681,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.10296765823081612
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.1728481455563331,
@@ -125,9 +125,9 @@
125
  "main_score": 0.22369020501138953
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
- "f1_macro,all": 0.26300477210440537,
129
  "acc,all": 0.2839052287581699,
130
- "main_score": 0.26300477210440537
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.03362436337787243,
@@ -135,24 +135,24 @@
135
  "main_score": 0.03362436337787243
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.07058169545364304,
139
  "acc,all": 0.08461538461538462,
140
- "main_score": 0.07058169545364304
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.06650348469472238,
144
  "acc,all": 0.05857142857142857,
145
- "main_score": 0.06650348469472238
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.07431443242652681,
149
  "acc,all": 0.07638072855464159,
150
- "main_score": 0.07431443242652681
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.10296765823081612,
154
  "acc,all": 0.14378109452736318,
155
- "main_score": 0.10296765823081612
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.16019839003154296,
38
+ "all_grouped_npm": -0.3066737359390134,
39
  "all_grouped": {
40
  "enem_challenge": 0.1728481455563331,
41
  "bluex": 0.1627260083449235,
42
  "oab_exams": 0.22369020501138953,
43
+ "assin2_rte": 0.3945071581566081,
44
  "assin2_sts": 0.03362436337787243,
45
+ "faquad_nli": 0.10587254318046457,
46
+ "hatebr_offensive": 0.09975522704208357,
47
+ "portuguese_hate_speech": 0.11147164863979023,
48
+ "tweetsentbr": 0.1372902109744215
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.1728481455563331,
52
  "harness|bluex|bluex|None|3": 0.1627260083449235,
53
  "harness|oab_exams|oab_exams|None|3": 0.22369020501138953,
54
+ "harness|assin2_rte|assin2_rte|None|15": 0.3945071581566081,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.03362436337787243,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.10587254318046457,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.09975522704208357,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.11147164863979023,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.1372902109744215
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.1728481455563331,
 
125
  "main_score": 0.22369020501138953
126
  },
127
  "harness|assin2_rte|assin2_rte|None|15": {
128
+ "f1_macro,all": 0.3945071581566081,
129
  "acc,all": 0.2839052287581699,
130
+ "main_score": 0.3945071581566081
131
  },
132
  "harness|assin2_sts|assin2_sts|None|15": {
133
  "pearson,all": 0.03362436337787243,
 
135
  "main_score": 0.03362436337787243
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.10587254318046457,
139
  "acc,all": 0.08461538461538462,
140
+ "main_score": 0.10587254318046457
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.09975522704208357,
144
  "acc,all": 0.05857142857142857,
145
+ "main_score": 0.09975522704208357
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.11147164863979023,
149
  "acc,all": 0.07638072855464159,
150
+ "main_score": 0.11147164863979023
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.1372902109744215,
154
  "acc,all": 0.14378109452736318,
155
+ "main_score": 0.1372902109744215
156
  }
157
  },
158
  "config_tasks": {
HuggingFaceTB/SmolLM-360M-Instruct/raw_2024-07-24T17-08-31.061263/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.3333333333333333,
5
- "acc,all": 0.5,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.036062757273046704,
10
- "mse,all": 2.5509232026143787,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.16689847009735745,
15
- "acc,exam_id__USP_2018": 0.14814814814814814,
16
- "acc,exam_id__USP_2021": 0.15384615384615385,
17
- "acc,exam_id__UNICAMP_2018": 0.14814814814814814,
18
- "acc,exam_id__UNICAMP_2020": 0.14545454545454545,
19
- "acc,exam_id__UNICAMP_2022": 0.20512820512820512,
20
- "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
21
- "acc,exam_id__USP_2019": 0.225,
22
- "acc,exam_id__USP_2022": 0.22448979591836735,
23
- "acc,exam_id__USP_2023": 0.045454545454545456,
24
- "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941,
25
- "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
26
- "acc,exam_id__USP_2024": 0.07317073170731707,
27
- "acc,exam_id__USP_2020": 0.17857142857142858,
28
- "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913,
29
- "acc,exam_id__UNICAMP_2019": 0.14,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.17984604618614417,
35
- "acc,exam_id__2014": 0.1926605504587156,
36
- "acc,exam_id__2015": 0.13445378151260504,
37
- "acc,exam_id__2013": 0.1574074074074074,
38
- "acc,exam_id__2009": 0.14782608695652175,
39
- "acc,exam_id__2022": 0.18796992481203006,
40
- "acc,exam_id__2017": 0.19827586206896552,
41
- "acc,exam_id__2010": 0.13675213675213677,
42
- "acc,exam_id__2012": 0.1810344827586207,
43
- "acc,exam_id__2016": 0.19834710743801653,
44
- "acc,exam_id__2011": 0.1794871794871795,
45
- "acc,exam_id__2016_2": 0.17886178861788618,
46
- "acc,exam_id__2023": 0.2518518518518518
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.09262250942380183,
50
- "acc,all": 0.13230769230769232,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.23490637157803498,
56
- "acc,all": 0.3407142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.23006833712984054,
60
- "acc,exam_id__2016-21": 0.2125,
61
- "acc,exam_id__2018-25": 0.2875,
62
- "acc,exam_id__2017-22": 0.25,
63
- "acc,exam_id__2012-08": 0.225,
64
- "acc,exam_id__2017-23": 0.2125,
65
- "acc,exam_id__2013-12": 0.175,
66
- "acc,exam_id__2011-05": 0.2375,
67
- "acc,exam_id__2016-20": 0.225,
68
- "acc,exam_id__2014-15": 0.21794871794871795,
69
- "acc,exam_id__2012-06a": 0.2375,
70
- "acc,exam_id__2016-20a": 0.3,
71
- "acc,exam_id__2014-13": 0.2375,
72
- "acc,exam_id__2013-10": 0.2125,
73
- "acc,exam_id__2012-09": 0.23376623376623376,
74
- "acc,exam_id__2015-16": 0.2375,
75
- "acc,exam_id__2011-03": 0.24242424242424243,
76
- "acc,exam_id__2013-11": 0.1625,
77
- "acc,exam_id__2016-19": 0.19230769230769232,
78
- "acc,exam_id__2014-14": 0.2625,
79
- "acc,exam_id__2010-01": 0.25882352941176473,
80
- "acc,exam_id__2012-07": 0.1375,
81
- "acc,exam_id__2017-24": 0.225,
82
- "acc,exam_id__2015-18": 0.25,
83
- "acc,exam_id__2011-04": 0.25,
84
- "acc,exam_id__2012-06": 0.2375,
85
- "acc,exam_id__2010-02": 0.24,
86
- "acc,exam_id__2015-17": 0.24358974358974358,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.1945811604219462,
92
- "acc,all": 0.20681551116333724
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.12400834046013225,
96
- "acc,all": 0.2975124378109453,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f52406fd4e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f52406fcea0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd120>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f52406fd6c0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd940>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f52406fc860>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f52406fcae0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1220,
1064
- "non_truncated": 12930,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 2128,
1068
- "has_chat_template": true,
1069
- "chat_type": "system_user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "61ee8dc97f5b99b609255a3b6091cbc0023c7692",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 723646464,
1075
- "model_num_parameters": 361821120,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 1,
1081
- "max_length": 2048,
1082
- "max_ctx_length": 2016,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1618.1425653594772,
1094
- "min_seq_length": 1593,
1095
- "max_seq_length": 1692,
1096
- "max_ctx_length": 2016,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1844.1425653594772,
1109
- "min_seq_length": 1819,
1110
- "max_seq_length": 1918,
1111
- "max_ctx_length": 2016,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 205,
1119
- "non_truncated": 514,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 267,
1123
- "mean_seq_length": 1882.730180806676,
1124
- "min_seq_length": 1479,
1125
- "max_seq_length": 2749,
1126
- "max_ctx_length": 2016,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.6286509040333796
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 162,
1134
- "non_truncated": 1267,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 170,
1138
- "mean_seq_length": 1803.6703988803358,
1139
- "min_seq_length": 1508,
1140
- "max_seq_length": 2796,
1141
- "max_ctx_length": 2016,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.881035689293212
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1875.5692307692307,
1154
- "min_seq_length": 1815,
1155
- "max_seq_length": 2009,
1156
- "max_ctx_length": 2016,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1568.8021428571428,
1169
- "min_seq_length": 1543,
1170
- "max_seq_length": 1844,
1171
- "max_ctx_length": 2016,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 2,
1179
- "non_truncated": 2193,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 2,
1183
- "mean_seq_length": 1567.8610478359908,
1184
- "min_seq_length": 1267,
1185
- "max_seq_length": 2116,
1186
- "max_ctx_length": 2016,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 2.9990888382687926
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 851,
1194
- "non_truncated": 0,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 1689,
1198
- "mean_seq_length": 2138.0317273795536,
1199
- "min_seq_length": 2101,
1200
- "max_seq_length": 2172,
1201
- "max_ctx_length": 2016,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 23.015276145710928
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1809.4268656716417,
1214
- "min_seq_length": 1788,
1215
- "max_seq_length": 1870,
1216
- "max_ctx_length": 2016,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=HuggingFaceTB/SmolLM-360M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3333333333333333,
5
+ "acc,all": 0.5,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.036062757273046704,
10
+ "mse,all": 2.5509232026143787,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.16689847009735745,
15
+ "acc,exam_id__USP_2018": 0.14814814814814814,
16
+ "acc,exam_id__USP_2021": 0.15384615384615385,
17
+ "acc,exam_id__UNICAMP_2018": 0.14814814814814814,
18
+ "acc,exam_id__UNICAMP_2020": 0.14545454545454545,
19
+ "acc,exam_id__UNICAMP_2022": 0.20512820512820512,
20
+ "acc,exam_id__UNICAMP_2024": 0.15555555555555556,
21
+ "acc,exam_id__USP_2019": 0.225,
22
+ "acc,exam_id__USP_2022": 0.22448979591836735,
23
+ "acc,exam_id__USP_2023": 0.045454545454545456,
24
+ "acc,exam_id__UNICAMP_2021_2": 0.11764705882352941,
25
+ "acc,exam_id__UNICAMP_2023": 0.3023255813953488,
26
+ "acc,exam_id__USP_2024": 0.07317073170731707,
27
+ "acc,exam_id__USP_2020": 0.17857142857142858,
28
+ "acc,exam_id__UNICAMP_2021_1": 0.2608695652173913,
29
+ "acc,exam_id__UNICAMP_2019": 0.14,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.17984604618614417,
35
+ "acc,exam_id__2014": 0.1926605504587156,
36
+ "acc,exam_id__2015": 0.13445378151260504,
37
+ "acc,exam_id__2013": 0.1574074074074074,
38
+ "acc,exam_id__2009": 0.14782608695652175,
39
+ "acc,exam_id__2022": 0.18796992481203006,
40
+ "acc,exam_id__2017": 0.19827586206896552,
41
+ "acc,exam_id__2010": 0.13675213675213677,
42
+ "acc,exam_id__2012": 0.1810344827586207,
43
+ "acc,exam_id__2016": 0.19834710743801653,
44
+ "acc,exam_id__2011": 0.1794871794871795,
45
+ "acc,exam_id__2016_2": 0.17886178861788618,
46
+ "acc,exam_id__2023": 0.2518518518518518
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.13893376413570277,
50
+ "acc,all": 0.13230769230769232,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.3523595573670525,
56
+ "acc,all": 0.3407142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.23006833712984054,
60
+ "acc,exam_id__2016-21": 0.2125,
61
+ "acc,exam_id__2018-25": 0.2875,
62
+ "acc,exam_id__2017-22": 0.25,
63
+ "acc,exam_id__2012-08": 0.225,
64
+ "acc,exam_id__2017-23": 0.2125,
65
+ "acc,exam_id__2013-12": 0.175,
66
+ "acc,exam_id__2011-05": 0.2375,
67
+ "acc,exam_id__2016-20": 0.225,
68
+ "acc,exam_id__2014-15": 0.21794871794871795,
69
+ "acc,exam_id__2012-06a": 0.2375,
70
+ "acc,exam_id__2016-20a": 0.3,
71
+ "acc,exam_id__2014-13": 0.2375,
72
+ "acc,exam_id__2013-10": 0.2125,
73
+ "acc,exam_id__2012-09": 0.23376623376623376,
74
+ "acc,exam_id__2015-16": 0.2375,
75
+ "acc,exam_id__2011-03": 0.24242424242424243,
76
+ "acc,exam_id__2013-11": 0.1625,
77
+ "acc,exam_id__2016-19": 0.19230769230769232,
78
+ "acc,exam_id__2014-14": 0.2625,
79
+ "acc,exam_id__2010-01": 0.25882352941176473,
80
+ "acc,exam_id__2012-07": 0.1375,
81
+ "acc,exam_id__2017-24": 0.225,
82
+ "acc,exam_id__2015-18": 0.25,
83
+ "acc,exam_id__2011-04": 0.25,
84
+ "acc,exam_id__2012-06": 0.2375,
85
+ "acc,exam_id__2010-02": 0.24,
86
+ "acc,exam_id__2015-17": 0.24358974358974358,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.2918717406329194,
92
+ "acc,all": 0.20681551116333724
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.16534445394684297,
96
+ "acc,all": 0.2975124378109453,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f52406fd4e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f52406fcea0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd120>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f52406fd6c0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f52406fd940>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f52406fc860>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f52406fcae0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1220,
1064
+ "non_truncated": 12930,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 2128,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "system_user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "61ee8dc97f5b99b609255a3b6091cbc0023c7692",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 723646464,
1075
+ "model_num_parameters": 361821120,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 1,
1081
+ "max_length": 2048,
1082
+ "max_ctx_length": 2016,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1618.1425653594772,
1094
+ "min_seq_length": 1593,
1095
+ "max_seq_length": 1692,
1096
+ "max_ctx_length": 2016,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1844.1425653594772,
1109
+ "min_seq_length": 1819,
1110
+ "max_seq_length": 1918,
1111
+ "max_ctx_length": 2016,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 205,
1119
+ "non_truncated": 514,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 267,
1123
+ "mean_seq_length": 1882.730180806676,
1124
+ "min_seq_length": 1479,
1125
+ "max_seq_length": 2749,
1126
+ "max_ctx_length": 2016,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.6286509040333796
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 162,
1134
+ "non_truncated": 1267,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 170,
1138
+ "mean_seq_length": 1803.6703988803358,
1139
+ "min_seq_length": 1508,
1140
+ "max_seq_length": 2796,
1141
+ "max_ctx_length": 2016,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.881035689293212
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1875.5692307692307,
1154
+ "min_seq_length": 1815,
1155
+ "max_seq_length": 2009,
1156
+ "max_ctx_length": 2016,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1568.8021428571428,
1169
+ "min_seq_length": 1543,
1170
+ "max_seq_length": 1844,
1171
+ "max_ctx_length": 2016,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 2,
1179
+ "non_truncated": 2193,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 2,
1183
+ "mean_seq_length": 1567.8610478359908,
1184
+ "min_seq_length": 1267,
1185
+ "max_seq_length": 2116,
1186
+ "max_ctx_length": 2016,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 2.9990888382687926
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 851,
1194
+ "non_truncated": 0,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 1689,
1198
+ "mean_seq_length": 2138.0317273795536,
1199
+ "min_seq_length": 2101,
1200
+ "max_seq_length": 2172,
1201
+ "max_ctx_length": 2016,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 23.015276145710928
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1809.4268656716417,
1214
+ "min_seq_length": 1788,
1215
+ "max_seq_length": 1870,
1216
+ "max_ctx_length": 2016,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=HuggingFaceTB/SmolLM-360M-Instruct,dtype=bfloat16,parallelize=True,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
HuggingFaceTB/SmolLM-360M-Instruct/results_2024-07-24T17-08-31.061263.json CHANGED
@@ -34,18 +34,18 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.1769252584337375,
38
- "all_grouped_npm": -0.27462637422910463,
39
  "all_grouped": {
40
  "enem_challenge": 0.17984604618614417,
41
  "bluex": 0.16689847009735745,
42
  "oab_exams": 0.23006833712984054,
43
  "assin2_rte": 0.3333333333333333,
44
  "assin2_sts": 0.036062757273046704,
45
- "faquad_nli": 0.09262250942380183,
46
- "hatebr_offensive": 0.23490637157803498,
47
- "portuguese_hate_speech": 0.1945811604219462,
48
- "tweetsentbr": 0.12400834046013225
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.17984604618614417,
@@ -53,10 +53,10 @@
53
  "harness|oab_exams|oab_exams|None|3": 0.23006833712984054,
54
  "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.036062757273046704,
56
- "harness|faquad_nli|faquad_nli|None|15": 0.09262250942380183,
57
- "harness|hatebr_offensive|hatebr_offensive|None|25": 0.23490637157803498,
58
- "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.1945811604219462,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.12400834046013225
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.17984604618614417,
@@ -135,24 +135,24 @@
135
  "main_score": 0.036062757273046704
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
- "f1_macro,all": 0.09262250942380183,
139
  "acc,all": 0.13230769230769232,
140
- "main_score": 0.09262250942380183
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
- "f1_macro,all": 0.23490637157803498,
144
  "acc,all": 0.3407142857142857,
145
- "main_score": 0.23490637157803498
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
- "f1_macro,all": 0.1945811604219462,
149
  "acc,all": 0.20681551116333724,
150
- "main_score": 0.1945811604219462
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.12400834046013225,
154
  "acc,all": 0.2975124378109453,
155
- "main_score": 0.12400834046013225
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.21052427334469329,
38
+ "all_grouped_npm": -0.21148330912467844,
39
  "all_grouped": {
40
  "enem_challenge": 0.17984604618614417,
41
  "bluex": 0.16689847009735745,
42
  "oab_exams": 0.23006833712984054,
43
  "assin2_rte": 0.3333333333333333,
44
  "assin2_sts": 0.036062757273046704,
45
+ "faquad_nli": 0.13893376413570277,
46
+ "hatebr_offensive": 0.3523595573670525,
47
+ "portuguese_hate_speech": 0.2918717406329194,
48
+ "tweetsentbr": 0.16534445394684297
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.17984604618614417,
 
53
  "harness|oab_exams|oab_exams|None|3": 0.23006833712984054,
54
  "harness|assin2_rte|assin2_rte|None|15": 0.3333333333333333,
55
  "harness|assin2_sts|assin2_sts|None|15": 0.036062757273046704,
56
+ "harness|faquad_nli|faquad_nli|None|15": 0.13893376413570277,
57
+ "harness|hatebr_offensive|hatebr_offensive|None|25": 0.3523595573670525,
58
+ "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.2918717406329194,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.16534445394684297
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.17984604618614417,
 
135
  "main_score": 0.036062757273046704
136
  },
137
  "harness|faquad_nli|faquad_nli|None|15": {
138
+ "f1_macro,all": 0.13893376413570277,
139
  "acc,all": 0.13230769230769232,
140
+ "main_score": 0.13893376413570277
141
  },
142
  "harness|hatebr_offensive|hatebr_offensive|None|25": {
143
+ "f1_macro,all": 0.3523595573670525,
144
  "acc,all": 0.3407142857142857,
145
+ "main_score": 0.3523595573670525
146
  },
147
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": {
148
+ "f1_macro,all": 0.2918717406329194,
149
  "acc,all": 0.20681551116333724,
150
+ "main_score": 0.2918717406329194
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.16534445394684297,
154
  "acc,all": 0.2975124378109453,
155
+ "main_score": 0.16534445394684297
156
  }
157
  },
158
  "config_tasks": {
Intel/neural-chat-7b-v3-1/raw_2024-02-25T06-21-33.008420/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9268770228292367,
5
- "acc,all": 0.9268790849673203,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7658477385894799,
10
- "mse,all": 0.5124264705882353,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.47983310152990266,
15
- "acc,exam_id__USP_2019": 0.4,
16
- "acc,exam_id__USP_2022": 0.46938775510204084,
17
- "acc,exam_id__USP_2023": 0.5909090909090909,
18
- "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
19
- "acc,exam_id__UNICAMP_2019": 0.54,
20
- "acc,exam_id__USP_2020": 0.48214285714285715,
21
- "acc,exam_id__UNICAMP_2020": 0.43636363636363634,
22
- "acc,exam_id__UNICAMP_2023": 0.5348837209302325,
23
- "acc,exam_id__USP_2021": 0.4423076923076923,
24
- "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
25
- "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
26
- "acc,exam_id__USP_2018": 0.37037037037037035,
27
- "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275,
28
- "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476,
29
- "acc,exam_id__USP_2024": 0.5853658536585366,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6263121063680895,
35
- "acc,exam_id__2013": 0.6388888888888888,
36
- "acc,exam_id__2016_2": 0.6666666666666666,
37
- "acc,exam_id__2016": 0.5619834710743802,
38
- "acc,exam_id__2011": 0.6837606837606838,
39
- "acc,exam_id__2017": 0.5948275862068966,
40
- "acc,exam_id__2023": 0.6814814814814815,
41
- "acc,exam_id__2014": 0.6513761467889908,
42
- "acc,exam_id__2012": 0.6379310344827587,
43
- "acc,exam_id__2009": 0.6,
44
- "acc,exam_id__2015": 0.5966386554621849,
45
- "acc,exam_id__2022": 0.6165413533834586,
46
- "acc,exam_id__2010": 0.5811965811965812
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7840135895978708,
50
- "acc,all": 0.8353846153846154,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8905574366528357,
56
- "acc,all": 0.8907142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.39726651480637815,
60
- "acc,exam_id__2016-20a": 0.3875,
61
- "acc,exam_id__2012-06": 0.4125,
62
- "acc,exam_id__2015-18": 0.4,
63
- "acc,exam_id__2014-14": 0.475,
64
- "acc,exam_id__2012-07": 0.3875,
65
- "acc,exam_id__2015-16": 0.3875,
66
- "acc,exam_id__2011-05": 0.4375,
67
- "acc,exam_id__2012-06a": 0.425,
68
- "acc,exam_id__2017-23": 0.35,
69
- "acc,exam_id__2016-19": 0.4230769230769231,
70
- "acc,exam_id__2017-24": 0.35,
71
- "acc,exam_id__2016-20": 0.375,
72
- "acc,exam_id__2017-22": 0.475,
73
- "acc,exam_id__2013-12": 0.45,
74
- "acc,exam_id__2010-02": 0.36,
75
- "acc,exam_id__2011-03": 0.3333333333333333,
76
- "acc,exam_id__2012-08": 0.425,
77
- "acc,exam_id__2013-10": 0.4125,
78
- "acc,exam_id__2016-21": 0.4125,
79
- "acc,exam_id__2014-15": 0.44871794871794873,
80
- "acc,exam_id__2018-25": 0.425,
81
- "acc,exam_id__2014-13": 0.275,
82
- "acc,exam_id__2010-01": 0.3764705882352941,
83
- "acc,exam_id__2015-17": 0.5,
84
- "acc,exam_id__2013-11": 0.4,
85
- "acc,exam_id__2011-04": 0.3125,
86
- "acc,exam_id__2012-09": 0.33766233766233766,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6685671281654837,
92
- "acc,all": 0.6874265569917744
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5145983702206705,
96
- "acc,all": 0.7114427860696517,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fb393d55120>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fb393d54ae0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fb393d54d60>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fb393d55300>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fb393d55580>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fb393d544a0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fb393d54720>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 0,
1144
- "non_truncated": 14150,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 0,
1148
- "has_chat_template": false,
1149
- "chat_type": null,
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "e852bc2e78a3fe509ec28c6d76512df3012acba7",
1153
- "model_dtype": "torch.float16",
1154
- "model_memory_footprint": 15020343296,
1155
- "model_num_parameters": 7241732096,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:0",
1160
- "batch_size": 16,
1161
- "max_length": 4096,
1162
- "max_ctx_length": 4064,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1369.7455065359477,
1174
- "min_seq_length": 1346,
1175
- "max_seq_length": 1436,
1176
- "max_ctx_length": 4064,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1593.7455065359477,
1189
- "min_seq_length": 1570,
1190
- "max_seq_length": 1660,
1191
- "max_ctx_length": 4064,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 0,
1199
- "non_truncated": 719,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 0,
1203
- "mean_seq_length": 1719.9262865090404,
1204
- "min_seq_length": 1343,
1205
- "max_seq_length": 2520,
1206
- "max_ctx_length": 4064,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 3.0
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 0,
1214
- "non_truncated": 1429,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 0,
1218
- "mean_seq_length": 1620.039188243527,
1219
- "min_seq_length": 1354,
1220
- "max_seq_length": 2618,
1221
- "max_ctx_length": 4064,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 3.0
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1594.9876923076922,
1234
- "min_seq_length": 1539,
1235
- "max_seq_length": 1715,
1236
- "max_ctx_length": 4064,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1305.3878571428572,
1249
- "min_seq_length": 1282,
1250
- "max_seq_length": 1556,
1251
- "max_ctx_length": 4064,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1365.764464692483,
1264
- "min_seq_length": 1099,
1265
- "max_seq_length": 1868,
1266
- "max_ctx_length": 4064,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 1806.3360752056403,
1279
- "min_seq_length": 1771,
1280
- "max_seq_length": 1845,
1281
- "max_ctx_length": 4064,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1552.2492537313433,
1294
- "min_seq_length": 1531,
1295
- "max_seq_length": 1647,
1296
- "max_ctx_length": 4064,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=Intel/neural-chat-7b-v3-1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": "804df15"
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9268770228292367,
5
+ "acc,all": 0.9268790849673203,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7658477385894799,
10
+ "mse,all": 0.5124264705882353,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.47983310152990266,
15
+ "acc,exam_id__USP_2019": 0.4,
16
+ "acc,exam_id__USP_2022": 0.46938775510204084,
17
+ "acc,exam_id__USP_2023": 0.5909090909090909,
18
+ "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
19
+ "acc,exam_id__UNICAMP_2019": 0.54,
20
+ "acc,exam_id__USP_2020": 0.48214285714285715,
21
+ "acc,exam_id__UNICAMP_2020": 0.43636363636363634,
22
+ "acc,exam_id__UNICAMP_2023": 0.5348837209302325,
23
+ "acc,exam_id__USP_2021": 0.4423076923076923,
24
+ "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
25
+ "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
26
+ "acc,exam_id__USP_2018": 0.37037037037037035,
27
+ "acc,exam_id__UNICAMP_2021_2": 0.45098039215686275,
28
+ "acc,exam_id__UNICAMP_2021_1": 0.45652173913043476,
29
+ "acc,exam_id__USP_2024": 0.5853658536585366,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6263121063680895,
35
+ "acc,exam_id__2013": 0.6388888888888888,
36
+ "acc,exam_id__2016_2": 0.6666666666666666,
37
+ "acc,exam_id__2016": 0.5619834710743802,
38
+ "acc,exam_id__2011": 0.6837606837606838,
39
+ "acc,exam_id__2017": 0.5948275862068966,
40
+ "acc,exam_id__2023": 0.6814814814814815,
41
+ "acc,exam_id__2014": 0.6513761467889908,
42
+ "acc,exam_id__2012": 0.6379310344827587,
43
+ "acc,exam_id__2009": 0.6,
44
+ "acc,exam_id__2015": 0.5966386554621849,
45
+ "acc,exam_id__2022": 0.6165413533834586,
46
+ "acc,exam_id__2010": 0.5811965811965812
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7840135895978708,
50
+ "acc,all": 0.8353846153846154,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8905574366528357,
56
+ "acc,all": 0.8907142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.39726651480637815,
60
+ "acc,exam_id__2016-20a": 0.3875,
61
+ "acc,exam_id__2012-06": 0.4125,
62
+ "acc,exam_id__2015-18": 0.4,
63
+ "acc,exam_id__2014-14": 0.475,
64
+ "acc,exam_id__2012-07": 0.3875,
65
+ "acc,exam_id__2015-16": 0.3875,
66
+ "acc,exam_id__2011-05": 0.4375,
67
+ "acc,exam_id__2012-06a": 0.425,
68
+ "acc,exam_id__2017-23": 0.35,
69
+ "acc,exam_id__2016-19": 0.4230769230769231,
70
+ "acc,exam_id__2017-24": 0.35,
71
+ "acc,exam_id__2016-20": 0.375,
72
+ "acc,exam_id__2017-22": 0.475,
73
+ "acc,exam_id__2013-12": 0.45,
74
+ "acc,exam_id__2010-02": 0.36,
75
+ "acc,exam_id__2011-03": 0.3333333333333333,
76
+ "acc,exam_id__2012-08": 0.425,
77
+ "acc,exam_id__2013-10": 0.4125,
78
+ "acc,exam_id__2016-21": 0.4125,
79
+ "acc,exam_id__2014-15": 0.44871794871794873,
80
+ "acc,exam_id__2018-25": 0.425,
81
+ "acc,exam_id__2014-13": 0.275,
82
+ "acc,exam_id__2010-01": 0.3764705882352941,
83
+ "acc,exam_id__2015-17": 0.5,
84
+ "acc,exam_id__2013-11": 0.4,
85
+ "acc,exam_id__2011-04": 0.3125,
86
+ "acc,exam_id__2012-09": 0.33766233766233766,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6685671281654837,
92
+ "acc,all": 0.6874265569917744
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6861311602942273,
96
+ "acc,all": 0.7114427860696517,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fb393d55120>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fb393d54ae0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fb393d54d60>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fb393d55300>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fb393d55580>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fb393d544a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fb393d54720>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 0,
1144
+ "non_truncated": 14150,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 0,
1148
+ "has_chat_template": false,
1149
+ "chat_type": null,
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "e852bc2e78a3fe509ec28c6d76512df3012acba7",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 15020343296,
1155
+ "model_num_parameters": 7241732096,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 16,
1161
+ "max_length": 4096,
1162
+ "max_ctx_length": 4064,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1369.7455065359477,
1174
+ "min_seq_length": 1346,
1175
+ "max_seq_length": 1436,
1176
+ "max_ctx_length": 4064,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1593.7455065359477,
1189
+ "min_seq_length": 1570,
1190
+ "max_seq_length": 1660,
1191
+ "max_ctx_length": 4064,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 0,
1199
+ "non_truncated": 719,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 0,
1203
+ "mean_seq_length": 1719.9262865090404,
1204
+ "min_seq_length": 1343,
1205
+ "max_seq_length": 2520,
1206
+ "max_ctx_length": 4064,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 3.0
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 0,
1214
+ "non_truncated": 1429,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 0,
1218
+ "mean_seq_length": 1620.039188243527,
1219
+ "min_seq_length": 1354,
1220
+ "max_seq_length": 2618,
1221
+ "max_ctx_length": 4064,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 3.0
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1594.9876923076922,
1234
+ "min_seq_length": 1539,
1235
+ "max_seq_length": 1715,
1236
+ "max_ctx_length": 4064,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1305.3878571428572,
1249
+ "min_seq_length": 1282,
1250
+ "max_seq_length": 1556,
1251
+ "max_ctx_length": 4064,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1365.764464692483,
1264
+ "min_seq_length": 1099,
1265
+ "max_seq_length": 1868,
1266
+ "max_ctx_length": 4064,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1806.3360752056403,
1279
+ "min_seq_length": 1771,
1280
+ "max_seq_length": 1845,
1281
+ "max_ctx_length": 4064,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1552.2492537313433,
1294
+ "min_seq_length": 1531,
1295
+ "max_seq_length": 1647,
1296
+ "max_ctx_length": 4064,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=Intel/neural-chat-7b-v3-1,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": "804df15"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
Intel/neural-chat-7b-v3-1/results_2024-02-25T06-21-33.008420.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6726525565288831,
38
- "all_grouped_npm": 0.5225859783991329,
39
  "all_grouped": {
40
  "enem_challenge": 0.6263121063680895,
41
  "bluex": 0.47983310152990266,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7840135895978708,
46
  "hatebr_offensive": 0.8905574366528357,
47
  "portuguese_hate_speech": 0.6685671281654837,
48
- "tweetsentbr": 0.5145983702206705
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7840135895978708,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8905574366528357,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6685671281654837,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5145983702206705
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6263121063680895,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6685671281654837
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5145983702206705,
154
  "acc,all": 0.7114427860696517,
155
- "main_score": 0.5145983702206705
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.691711755425945,
38
+ "all_grouped_npm": 0.5509478815197606,
39
  "all_grouped": {
40
  "enem_challenge": 0.6263121063680895,
41
  "bluex": 0.47983310152990266,
 
45
  "faquad_nli": 0.7840135895978708,
46
  "hatebr_offensive": 0.8905574366528357,
47
  "portuguese_hate_speech": 0.6685671281654837,
48
+ "tweetsentbr": 0.6861311602942273
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7840135895978708,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8905574366528357,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6685671281654837,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6861311602942273
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6263121063680895,
 
150
  "main_score": 0.6685671281654837
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6861311602942273,
154
  "acc,all": 0.7114427860696517,
155
+ "main_score": 0.6861311602942273
156
  }
157
  },
158
  "config_tasks": {
Intel/neural-chat-7b-v3-3/raw_2024-02-21T22-54-50.520595/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9140545431322211,
5
- "acc,all": 0.9142156862745098,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7587721518241414,
10
- "mse,all": 0.5857066993464052,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5034770514603616,
15
- "acc,exam_id__USP_2021": 0.4423076923076923,
16
- "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
17
- "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
18
- "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216,
19
- "acc,exam_id__USP_2024": 0.6585365853658537,
20
- "acc,exam_id__UNICAMP_2018": 0.46296296296296297,
21
- "acc,exam_id__USP_2022": 0.42857142857142855,
22
- "acc,exam_id__UNICAMP_2020": 0.45454545454545453,
23
- "acc,exam_id__USP_2018": 0.3888888888888889,
24
- "acc,exam_id__USP_2020": 0.5535714285714286,
25
- "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
26
- "acc,exam_id__USP_2019": 0.475,
27
- "acc,exam_id__USP_2023": 0.6590909090909091,
28
- "acc,exam_id__UNICAMP_2019": 0.58,
29
- "acc,exam_id__UNICAMP_2024": 0.4444444444444444,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6263121063680895,
35
- "acc,exam_id__2016_2": 0.6504065040650406,
36
- "acc,exam_id__2009": 0.6173913043478261,
37
- "acc,exam_id__2011": 0.7008547008547008,
38
- "acc,exam_id__2012": 0.6120689655172413,
39
- "acc,exam_id__2013": 0.6111111111111112,
40
- "acc,exam_id__2016": 0.5867768595041323,
41
- "acc,exam_id__2022": 0.6390977443609023,
42
- "acc,exam_id__2023": 0.6592592592592592,
43
- "acc,exam_id__2010": 0.5982905982905983,
44
- "acc,exam_id__2014": 0.6330275229357798,
45
- "acc,exam_id__2015": 0.6050420168067226,
46
- "acc,exam_id__2017": 0.5948275862068966
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7147222222222223,
50
- "acc,all": 0.7569230769230769,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8653967318817455,
56
- "acc,all": 0.8657142857142858
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.39635535307517084,
60
- "acc,exam_id__2010-02": 0.43,
61
- "acc,exam_id__2016-19": 0.46153846153846156,
62
- "acc,exam_id__2015-17": 0.47435897435897434,
63
- "acc,exam_id__2016-21": 0.3625,
64
- "acc,exam_id__2017-24": 0.3625,
65
- "acc,exam_id__2012-09": 0.3116883116883117,
66
- "acc,exam_id__2011-04": 0.3625,
67
- "acc,exam_id__2017-23": 0.3875,
68
- "acc,exam_id__2011-03": 0.36363636363636365,
69
- "acc,exam_id__2012-07": 0.35,
70
- "acc,exam_id__2012-06": 0.4625,
71
- "acc,exam_id__2014-13": 0.3,
72
- "acc,exam_id__2016-20a": 0.4375,
73
- "acc,exam_id__2011-05": 0.425,
74
- "acc,exam_id__2015-18": 0.3875,
75
- "acc,exam_id__2014-15": 0.41025641025641024,
76
- "acc,exam_id__2018-25": 0.4625,
77
- "acc,exam_id__2017-22": 0.475,
78
- "acc,exam_id__2013-11": 0.4375,
79
- "acc,exam_id__2014-14": 0.425,
80
- "acc,exam_id__2013-10": 0.425,
81
- "acc,exam_id__2010-01": 0.3176470588235294,
82
- "acc,exam_id__2013-12": 0.4125,
83
- "acc,exam_id__2015-16": 0.2875,
84
- "acc,exam_id__2012-06a": 0.4375,
85
- "acc,exam_id__2016-20": 0.4125,
86
- "acc,exam_id__2012-08": 0.325,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6322323153577603,
92
- "acc,all": 0.6404230317273796
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.4689260995001763,
96
- "acc,all": 0.6925373134328359,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd79f4a5120>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a4ae0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a4d60>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a5300>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a5580>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fd79f4a44a0>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fd79f4a4720>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 0,
1144
- "non_truncated": 14150,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 0,
1148
- "has_chat_template": false,
1149
- "chat_type": null,
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "cdce282d00962dcc4dc317fc5786b332d370a6d4",
1153
- "model_dtype": "torch.float16",
1154
- "model_memory_footprint": 15020343296,
1155
- "model_num_parameters": 7241732096,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:0",
1160
- "batch_size": 16,
1161
- "max_length": 4096,
1162
- "max_ctx_length": 4064,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1369.7455065359477,
1174
- "min_seq_length": 1346,
1175
- "max_seq_length": 1436,
1176
- "max_ctx_length": 4064,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1593.7455065359477,
1189
- "min_seq_length": 1570,
1190
- "max_seq_length": 1660,
1191
- "max_ctx_length": 4064,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 0,
1199
- "non_truncated": 719,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 0,
1203
- "mean_seq_length": 1719.9262865090404,
1204
- "min_seq_length": 1343,
1205
- "max_seq_length": 2520,
1206
- "max_ctx_length": 4064,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 3.0
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 0,
1214
- "non_truncated": 1429,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 0,
1218
- "mean_seq_length": 1620.039188243527,
1219
- "min_seq_length": 1354,
1220
- "max_seq_length": 2618,
1221
- "max_ctx_length": 4064,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 3.0
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1594.9876923076922,
1234
- "min_seq_length": 1539,
1235
- "max_seq_length": 1715,
1236
- "max_ctx_length": 4064,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1305.3878571428572,
1249
- "min_seq_length": 1282,
1250
- "max_seq_length": 1556,
1251
- "max_ctx_length": 4064,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1365.764464692483,
1264
- "min_seq_length": 1099,
1265
- "max_seq_length": 1868,
1266
- "max_ctx_length": 4064,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 1806.3360752056403,
1279
- "min_seq_length": 1771,
1280
- "max_seq_length": 1845,
1281
- "max_ctx_length": 4064,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1552.2492537313433,
1294
- "min_seq_length": 1531,
1295
- "max_seq_length": 1647,
1296
- "max_ctx_length": 4064,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=Intel/neural-chat-7b-v3-3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": "804df15"
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9140545431322211,
5
+ "acc,all": 0.9142156862745098,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7587721518241414,
10
+ "mse,all": 0.5857066993464052,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5034770514603616,
15
+ "acc,exam_id__USP_2021": 0.4423076923076923,
16
+ "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
17
+ "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
18
+ "acc,exam_id__UNICAMP_2021_1": 0.43478260869565216,
19
+ "acc,exam_id__USP_2024": 0.6585365853658537,
20
+ "acc,exam_id__UNICAMP_2018": 0.46296296296296297,
21
+ "acc,exam_id__USP_2022": 0.42857142857142855,
22
+ "acc,exam_id__UNICAMP_2020": 0.45454545454545453,
23
+ "acc,exam_id__USP_2018": 0.3888888888888889,
24
+ "acc,exam_id__USP_2020": 0.5535714285714286,
25
+ "acc,exam_id__UNICAMP_2022": 0.5641025641025641,
26
+ "acc,exam_id__USP_2019": 0.475,
27
+ "acc,exam_id__USP_2023": 0.6590909090909091,
28
+ "acc,exam_id__UNICAMP_2019": 0.58,
29
+ "acc,exam_id__UNICAMP_2024": 0.4444444444444444,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6263121063680895,
35
+ "acc,exam_id__2016_2": 0.6504065040650406,
36
+ "acc,exam_id__2009": 0.6173913043478261,
37
+ "acc,exam_id__2011": 0.7008547008547008,
38
+ "acc,exam_id__2012": 0.6120689655172413,
39
+ "acc,exam_id__2013": 0.6111111111111112,
40
+ "acc,exam_id__2016": 0.5867768595041323,
41
+ "acc,exam_id__2022": 0.6390977443609023,
42
+ "acc,exam_id__2023": 0.6592592592592592,
43
+ "acc,exam_id__2010": 0.5982905982905983,
44
+ "acc,exam_id__2014": 0.6330275229357798,
45
+ "acc,exam_id__2015": 0.6050420168067226,
46
+ "acc,exam_id__2017": 0.5948275862068966
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7147222222222223,
50
+ "acc,all": 0.7569230769230769,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8653967318817455,
56
+ "acc,all": 0.8657142857142858
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.39635535307517084,
60
+ "acc,exam_id__2010-02": 0.43,
61
+ "acc,exam_id__2016-19": 0.46153846153846156,
62
+ "acc,exam_id__2015-17": 0.47435897435897434,
63
+ "acc,exam_id__2016-21": 0.3625,
64
+ "acc,exam_id__2017-24": 0.3625,
65
+ "acc,exam_id__2012-09": 0.3116883116883117,
66
+ "acc,exam_id__2011-04": 0.3625,
67
+ "acc,exam_id__2017-23": 0.3875,
68
+ "acc,exam_id__2011-03": 0.36363636363636365,
69
+ "acc,exam_id__2012-07": 0.35,
70
+ "acc,exam_id__2012-06": 0.4625,
71
+ "acc,exam_id__2014-13": 0.3,
72
+ "acc,exam_id__2016-20a": 0.4375,
73
+ "acc,exam_id__2011-05": 0.425,
74
+ "acc,exam_id__2015-18": 0.3875,
75
+ "acc,exam_id__2014-15": 0.41025641025641024,
76
+ "acc,exam_id__2018-25": 0.4625,
77
+ "acc,exam_id__2017-22": 0.475,
78
+ "acc,exam_id__2013-11": 0.4375,
79
+ "acc,exam_id__2014-14": 0.425,
80
+ "acc,exam_id__2013-10": 0.425,
81
+ "acc,exam_id__2010-01": 0.3176470588235294,
82
+ "acc,exam_id__2013-12": 0.4125,
83
+ "acc,exam_id__2015-16": 0.2875,
84
+ "acc,exam_id__2012-06a": 0.4375,
85
+ "acc,exam_id__2016-20": 0.4125,
86
+ "acc,exam_id__2012-08": 0.325,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6322323153577603,
92
+ "acc,all": 0.6404230317273796
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6252347993335684,
96
+ "acc,all": 0.6925373134328359,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fd79f4a5120>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a4ae0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a4d60>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fd79f4a5300>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fd79f4a5580>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fd79f4a44a0>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fd79f4a4720>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 0,
1144
+ "non_truncated": 14150,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 0,
1148
+ "has_chat_template": false,
1149
+ "chat_type": null,
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "cdce282d00962dcc4dc317fc5786b332d370a6d4",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 15020343296,
1155
+ "model_num_parameters": 7241732096,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:0",
1160
+ "batch_size": 16,
1161
+ "max_length": 4096,
1162
+ "max_ctx_length": 4064,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1369.7455065359477,
1174
+ "min_seq_length": 1346,
1175
+ "max_seq_length": 1436,
1176
+ "max_ctx_length": 4064,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1593.7455065359477,
1189
+ "min_seq_length": 1570,
1190
+ "max_seq_length": 1660,
1191
+ "max_ctx_length": 4064,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 0,
1199
+ "non_truncated": 719,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 0,
1203
+ "mean_seq_length": 1719.9262865090404,
1204
+ "min_seq_length": 1343,
1205
+ "max_seq_length": 2520,
1206
+ "max_ctx_length": 4064,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 3.0
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 0,
1214
+ "non_truncated": 1429,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 0,
1218
+ "mean_seq_length": 1620.039188243527,
1219
+ "min_seq_length": 1354,
1220
+ "max_seq_length": 2618,
1221
+ "max_ctx_length": 4064,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 3.0
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1594.9876923076922,
1234
+ "min_seq_length": 1539,
1235
+ "max_seq_length": 1715,
1236
+ "max_ctx_length": 4064,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1305.3878571428572,
1249
+ "min_seq_length": 1282,
1250
+ "max_seq_length": 1556,
1251
+ "max_ctx_length": 4064,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1365.764464692483,
1264
+ "min_seq_length": 1099,
1265
+ "max_seq_length": 1868,
1266
+ "max_ctx_length": 4064,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1806.3360752056403,
1279
+ "min_seq_length": 1771,
1280
+ "max_seq_length": 1845,
1281
+ "max_ctx_length": 4064,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1552.2492537313433,
1294
+ "min_seq_length": 1531,
1295
+ "max_seq_length": 1647,
1296
+ "max_ctx_length": 4064,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=Intel/neural-chat-7b-v3-3,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=4096",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": "804df15"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
Intel/neural-chat-7b-v3-3/results_2024-02-21T22-54-50.520595.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6533609527579876,
38
- "all_grouped_npm": 0.4871606934211402,
39
  "all_grouped": {
40
  "enem_challenge": 0.6263121063680895,
41
  "bluex": 0.5034770514603616,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7147222222222223,
46
  "hatebr_offensive": 0.8653967318817455,
47
  "portuguese_hate_speech": 0.6322323153577603,
48
- "tweetsentbr": 0.4689260995001763
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7147222222222223,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8653967318817455,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6322323153577603,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.4689260995001763
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6263121063680895,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6322323153577603
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.4689260995001763,
154
  "acc,all": 0.6925373134328359,
155
- "main_score": 0.4689260995001763
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6707285860728089,
38
+ "all_grouped_npm": 0.5130053858539101,
39
  "all_grouped": {
40
  "enem_challenge": 0.6263121063680895,
41
  "bluex": 0.5034770514603616,
 
45
  "faquad_nli": 0.7147222222222223,
46
  "hatebr_offensive": 0.8653967318817455,
47
  "portuguese_hate_speech": 0.6322323153577603,
48
+ "tweetsentbr": 0.6252347993335684
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6263121063680895,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7147222222222223,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8653967318817455,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6322323153577603,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6252347993335684
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6263121063680895,
 
150
  "main_score": 0.6322323153577603
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6252347993335684,
154
  "acc,all": 0.6925373134328359,
155
+ "main_score": 0.6252347993335684
156
  }
157
  },
158
  "config_tasks": {
JJhooww/Mistral_Relora_Step2k/raw_2024-03-09T08-42-21.029909/results.json CHANGED
@@ -1,1324 +1,1324 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9113496854193482,
5
- "acc,all": 0.9113562091503268,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7074610038971542,
10
- "mse,all": 0.7742524509803921,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5257301808066759,
15
- "acc,exam_id__USP_2019": 0.5,
16
- "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274,
17
- "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
18
- "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
19
- "acc,exam_id__UNICAMP_2019": 0.58,
20
- "acc,exam_id__USP_2020": 0.5714285714285714,
21
- "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
22
- "acc,exam_id__USP_2024": 0.7073170731707317,
23
- "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
24
- "acc,exam_id__USP_2022": 0.4489795918367347,
25
- "acc,exam_id__UNICAMP_2023": 0.627906976744186,
26
- "acc,exam_id__USP_2021": 0.4807692307692308,
27
- "acc,exam_id__USP_2023": 0.5909090909090909,
28
- "acc,exam_id__USP_2018": 0.4444444444444444,
29
- "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.615815255423373,
35
- "acc,exam_id__2011": 0.6837606837606838,
36
- "acc,exam_id__2014": 0.6513761467889908,
37
- "acc,exam_id__2012": 0.5948275862068966,
38
- "acc,exam_id__2016_2": 0.6097560975609756,
39
- "acc,exam_id__2013": 0.6296296296296297,
40
- "acc,exam_id__2009": 0.6,
41
- "acc,exam_id__2022": 0.5939849624060151,
42
- "acc,exam_id__2010": 0.5897435897435898,
43
- "acc,exam_id__2017": 0.6206896551724138,
44
- "acc,exam_id__2023": 0.6592592592592592,
45
- "acc,exam_id__2015": 0.5630252100840336,
46
- "acc,exam_id__2016": 0.5950413223140496
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.6526577185427341,
50
- "acc,all": 0.816923076923077,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8133973664850924,
56
- "acc,all": 0.8157142857142857
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.3981776765375854,
60
- "acc,exam_id__2016-20a": 0.325,
61
- "acc,exam_id__2012-07": 0.4125,
62
- "acc,exam_id__2015-16": 0.3375,
63
- "acc,exam_id__2010-01": 0.3176470588235294,
64
- "acc,exam_id__2016-21": 0.3625,
65
- "acc,exam_id__2017-23": 0.3625,
66
- "acc,exam_id__2012-08": 0.3875,
67
- "acc,exam_id__2012-06": 0.5125,
68
- "acc,exam_id__2014-15": 0.46153846153846156,
69
- "acc,exam_id__2011-04": 0.3125,
70
- "acc,exam_id__2014-13": 0.3375,
71
- "acc,exam_id__2013-12": 0.425,
72
- "acc,exam_id__2014-14": 0.5125,
73
- "acc,exam_id__2017-22": 0.5375,
74
- "acc,exam_id__2013-10": 0.35,
75
- "acc,exam_id__2018-25": 0.3875,
76
- "acc,exam_id__2012-06a": 0.425,
77
- "acc,exam_id__2016-19": 0.47435897435897434,
78
- "acc,exam_id__2017-24": 0.3875,
79
- "acc,exam_id__2011-05": 0.35,
80
- "acc,exam_id__2015-17": 0.44871794871794873,
81
- "acc,exam_id__2011-03": 0.35353535353535354,
82
- "acc,exam_id__2010-02": 0.42,
83
- "acc,exam_id__2013-11": 0.45,
84
- "acc,exam_id__2012-09": 0.35064935064935066,
85
- "acc,exam_id__2016-20": 0.4,
86
- "acc,exam_id__2015-18": 0.3625,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6536416538696902,
92
- "acc,all": 0.6686251468860165
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5193585604823832,
96
- "acc,all": 0.7074626865671642,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff1779f53a0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7ff1779f4d60>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff1779f4fe0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7ff1779f5580>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff1779f5800>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7ff1779f4720>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7ff1779f49a0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia-temp/tweetsentbr",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "id_sampler",
990
- "sampler_config": {
991
- "id_list": [
992
- "862006098672459776",
993
- "861612241703063552",
994
- "861833257087848448",
995
- "861283345476571138",
996
- "861283000335695873",
997
- "862139461274152962",
998
- "862139468702265344",
999
- "862006107702734848",
1000
- "862004354458537984",
1001
- "861833322925883392",
1002
- "861603063190171648",
1003
- "862139462716989440",
1004
- "862005877355810818",
1005
- "861751885862244353",
1006
- "862045180261695489",
1007
- "862004252499226630",
1008
- "862023970828292097",
1009
- "862041752127107074",
1010
- "862034961863503872",
1011
- "861293756548608001",
1012
- "861993527575695360",
1013
- "862003099355021315",
1014
- "862002404086206467",
1015
- "861282989602463744",
1016
- "862139454399668229",
1017
- "862139463769743361",
1018
- "862054906689138688",
1019
- "862139446535360513",
1020
- "861997363744911361",
1021
- "862057988898648065",
1022
- "861329080083521536",
1023
- "861286289034838016",
1024
- "861833050526806017",
1025
- "861300658565255169",
1026
- "861989003821813760",
1027
- "861682750398631938",
1028
- "861283275716907008",
1029
- "861283402523267072",
1030
- "861873108147466240",
1031
- "862139462138171392",
1032
- "861284090271715333",
1033
- "862139446149427201",
1034
- "861629109331525633",
1035
- "861721698609098753",
1036
- "862139453124612096",
1037
- "861283339482914816",
1038
- "861282466291748867",
1039
- "862055346759749632",
1040
- "862003019860389891",
1041
- "862140698346344449",
1042
- "862084376280092672",
1043
- "862003058708017152",
1044
- "862000677345787904",
1045
- "862029129310502913",
1046
- "862005822376882178",
1047
- "861969836297134085",
1048
- "861302955361927168",
1049
- "862064949451005953",
1050
- "861282589541355520",
1051
- "862005476858486784",
1052
- "862004684411850757",
1053
- "862139471101349890",
1054
- "862139467146170368",
1055
- "862139475098558465",
1056
- "862140706550403072",
1057
- "861282777001537536",
1058
- "862003184147079169",
1059
- "861283410656059394",
1060
- "861283417857691649",
1061
- "861888778922856448",
1062
- "861655860812099585",
1063
- "861834248063504384",
1064
- "862005210935382017",
1065
- "861282716930760704",
1066
- "861287082433622022"
1067
- ],
1068
- "id_column": "id"
1069
- }
1070
- },
1071
- "num_fewshot": 25,
1072
- "metric_list": [
1073
- {
1074
- "metric": "f1_macro",
1075
- "aggregation": "f1_macro",
1076
- "higher_is_better": true
1077
  },
1078
- {
1079
- "metric": "acc",
1080
- "aggregation": "acc",
1081
- "higher_is_better": true
1082
- }
1083
- ],
1084
- "output_type": "generate_until",
1085
- "generation_kwargs": {
1086
- "max_gen_toks": 32,
1087
- "do_sample": false,
1088
- "temperature": 0.0,
1089
- "top_k": null,
1090
- "top_p": null,
1091
- "until": [
1092
- "\n\n"
1093
- ]
1094
- },
1095
- "repeats": 1,
1096
- "filter_list": [
1097
- {
1098
- "name": "all",
1099
- "filter": [
1100
- {
1101
- "function": "find_similar_label",
1102
- "labels": [
1103
- "Positivo",
1104
- "Neutro",
1105
- "Negativo"
1106
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
  },
1108
- {
1109
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  }
1111
- ]
1112
  }
1113
- ],
1114
- "should_decontaminate": false,
1115
- "metadata": {
1116
- "version": 1.0
1117
- }
1118
- }
1119
- },
1120
- "versions": {
1121
- "assin2_rte": 1.1,
1122
- "assin2_sts": 1.1,
1123
- "bluex": 1.1,
1124
- "enem_challenge": 1.1,
1125
- "faquad_nli": 1.1,
1126
- "hatebr_offensive": 1.0,
1127
- "oab_exams": 1.5,
1128
- "portuguese_hate_speech": 1.0,
1129
- "tweetsentbr": 1.0
1130
- },
1131
- "n-shot": {
1132
- "assin2_rte": 15,
1133
- "assin2_sts": 15,
1134
- "bluex": 3,
1135
- "enem_challenge": 3,
1136
- "faquad_nli": 15,
1137
- "hatebr_offensive": 25,
1138
- "oab_exams": 3,
1139
- "portuguese_hate_speech": 25,
1140
- "tweetsentbr": 25
1141
- },
1142
- "model_meta": {
1143
- "truncated": 3,
1144
- "non_truncated": 14147,
1145
- "padded": 0,
1146
- "non_padded": 14150,
1147
- "fewshots_truncated": 3,
1148
- "has_chat_template": true,
1149
- "chat_type": "user_assistant",
1150
- "n_gpus": 1,
1151
- "accelerate_num_process": null,
1152
- "model_sha": "3bd728425d680d1f0472b1cdc553f036bfc90c48",
1153
- "model_dtype": "torch.float16",
1154
- "model_memory_footprint": 15020343296,
1155
- "model_num_parameters": 7241732096,
1156
- "model_is_loaded_in_4bit": null,
1157
- "model_is_loaded_in_8bit": null,
1158
- "model_is_quantized": null,
1159
- "model_device": "cuda:1",
1160
- "batch_size": 8,
1161
- "max_length": 2560,
1162
- "max_ctx_length": 2528,
1163
- "max_gen_toks": 32
1164
- },
1165
- "task_model_meta": {
1166
- "assin2_rte": {
1167
- "sample_size": 2448,
1168
- "truncated": 0,
1169
- "non_truncated": 2448,
1170
- "padded": 0,
1171
- "non_padded": 2448,
1172
- "fewshots_truncated": 0,
1173
- "mean_seq_length": 1451.7455065359477,
1174
- "min_seq_length": 1428,
1175
- "max_seq_length": 1518,
1176
- "max_ctx_length": 2528,
1177
- "max_gen_toks": 32,
1178
- "mean_original_fewshots_size": 15.0,
1179
- "mean_effective_fewshot_size": 15.0
1180
- },
1181
- "assin2_sts": {
1182
- "sample_size": 2448,
1183
- "truncated": 0,
1184
- "non_truncated": 2448,
1185
- "padded": 0,
1186
- "non_padded": 2448,
1187
- "fewshots_truncated": 0,
1188
- "mean_seq_length": 1675.7455065359477,
1189
- "min_seq_length": 1652,
1190
- "max_seq_length": 1742,
1191
- "max_ctx_length": 2528,
1192
- "max_gen_toks": 32,
1193
- "mean_original_fewshots_size": 15.0,
1194
- "mean_effective_fewshot_size": 15.0
1195
- },
1196
- "bluex": {
1197
- "sample_size": 719,
1198
- "truncated": 1,
1199
- "non_truncated": 718,
1200
- "padded": 0,
1201
- "non_padded": 719,
1202
- "fewshots_truncated": 1,
1203
- "mean_seq_length": 1744.9262865090404,
1204
- "min_seq_length": 1368,
1205
- "max_seq_length": 2545,
1206
- "max_ctx_length": 2528,
1207
- "max_gen_toks": 32,
1208
- "mean_original_fewshots_size": 3.0,
1209
- "mean_effective_fewshot_size": 2.998609179415855
1210
  },
1211
- "enem_challenge": {
1212
- "sample_size": 1429,
1213
- "truncated": 2,
1214
- "non_truncated": 1427,
1215
- "padded": 0,
1216
- "non_padded": 1429,
1217
- "fewshots_truncated": 2,
1218
- "mean_seq_length": 1645.039188243527,
1219
- "min_seq_length": 1379,
1220
- "max_seq_length": 2643,
1221
- "max_ctx_length": 2528,
1222
- "max_gen_toks": 32,
1223
- "mean_original_fewshots_size": 3.0,
1224
- "mean_effective_fewshot_size": 2.998600419874038
1225
  },
1226
- "faquad_nli": {
1227
- "sample_size": 650,
1228
- "truncated": 0,
1229
- "non_truncated": 650,
1230
- "padded": 0,
1231
- "non_padded": 650,
1232
- "fewshots_truncated": 0,
1233
- "mean_seq_length": 1691.9876923076922,
1234
- "min_seq_length": 1636,
1235
- "max_seq_length": 1812,
1236
- "max_ctx_length": 2528,
1237
- "max_gen_toks": 32,
1238
- "mean_original_fewshots_size": 15.0,
1239
- "mean_effective_fewshot_size": 15.0
1240
  },
1241
- "hatebr_offensive": {
1242
- "sample_size": 1400,
1243
- "truncated": 0,
1244
- "non_truncated": 1400,
1245
- "padded": 0,
1246
- "non_padded": 1400,
1247
- "fewshots_truncated": 0,
1248
- "mean_seq_length": 1462.3878571428572,
1249
- "min_seq_length": 1439,
1250
- "max_seq_length": 1713,
1251
- "max_ctx_length": 2528,
1252
- "max_gen_toks": 32,
1253
- "mean_original_fewshots_size": 25.0,
1254
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
 
 
1255
  },
1256
- "oab_exams": {
1257
- "sample_size": 2195,
1258
- "truncated": 0,
1259
- "non_truncated": 2195,
1260
- "padded": 0,
1261
- "non_padded": 2195,
1262
- "fewshots_truncated": 0,
1263
- "mean_seq_length": 1390.764464692483,
1264
- "min_seq_length": 1124,
1265
- "max_seq_length": 1893,
1266
- "max_ctx_length": 2528,
1267
- "max_gen_toks": 32,
1268
- "mean_original_fewshots_size": 3.0,
1269
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1270
  },
1271
- "portuguese_hate_speech": {
1272
- "sample_size": 851,
1273
- "truncated": 0,
1274
- "non_truncated": 851,
1275
- "padded": 0,
1276
- "non_padded": 851,
1277
- "fewshots_truncated": 0,
1278
- "mean_seq_length": 1963.3360752056403,
1279
- "min_seq_length": 1928,
1280
- "max_seq_length": 2002,
1281
- "max_ctx_length": 2528,
1282
- "max_gen_toks": 32,
1283
- "mean_original_fewshots_size": 25.0,
1284
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1285
  },
1286
- "tweetsentbr": {
1287
- "sample_size": 2010,
1288
- "truncated": 0,
1289
- "non_truncated": 2010,
1290
- "padded": 0,
1291
- "non_padded": 2010,
1292
- "fewshots_truncated": 0,
1293
- "mean_seq_length": 1709.2492537313433,
1294
- "min_seq_length": 1688,
1295
- "max_seq_length": 1804,
1296
- "max_ctx_length": 2528,
1297
- "max_gen_toks": 32,
1298
- "mean_original_fewshots_size": 25.0,
1299
- "mean_effective_fewshot_size": 25.0
1300
- }
1301
- },
1302
- "config": {
1303
- "model": "huggingface",
1304
- "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
- "batch_size": "auto",
1306
- "batch_sizes": [],
1307
- "device": null,
1308
- "use_cache": null,
1309
- "limit": [
1310
- null,
1311
- null,
1312
- null,
1313
- null,
1314
- null,
1315
- null,
1316
- null,
1317
- null,
1318
- null
1319
- ],
1320
- "bootstrap_iters": 0,
1321
- "gen_kwargs": null
1322
- },
1323
- "git_hash": null
1324
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9113496854193482,
5
+ "acc,all": 0.9113562091503268,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7074610038971542,
10
+ "mse,all": 0.7742524509803921,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5257301808066759,
15
+ "acc,exam_id__USP_2019": 0.5,
16
+ "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274,
17
+ "acc,exam_id__UNICAMP_2020": 0.4909090909090909,
18
+ "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
19
+ "acc,exam_id__UNICAMP_2019": 0.58,
20
+ "acc,exam_id__USP_2020": 0.5714285714285714,
21
+ "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
22
+ "acc,exam_id__USP_2024": 0.7073170731707317,
23
+ "acc,exam_id__UNICAMP_2018": 0.4074074074074074,
24
+ "acc,exam_id__USP_2022": 0.4489795918367347,
25
+ "acc,exam_id__UNICAMP_2023": 0.627906976744186,
26
+ "acc,exam_id__USP_2021": 0.4807692307692308,
27
+ "acc,exam_id__USP_2023": 0.5909090909090909,
28
+ "acc,exam_id__USP_2018": 0.4444444444444444,
29
+ "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.615815255423373,
35
+ "acc,exam_id__2011": 0.6837606837606838,
36
+ "acc,exam_id__2014": 0.6513761467889908,
37
+ "acc,exam_id__2012": 0.5948275862068966,
38
+ "acc,exam_id__2016_2": 0.6097560975609756,
39
+ "acc,exam_id__2013": 0.6296296296296297,
40
+ "acc,exam_id__2009": 0.6,
41
+ "acc,exam_id__2022": 0.5939849624060151,
42
+ "acc,exam_id__2010": 0.5897435897435898,
43
+ "acc,exam_id__2017": 0.6206896551724138,
44
+ "acc,exam_id__2023": 0.6592592592592592,
45
+ "acc,exam_id__2015": 0.5630252100840336,
46
+ "acc,exam_id__2016": 0.5950413223140496
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.6526577185427341,
50
+ "acc,all": 0.816923076923077,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8133973664850924,
56
+ "acc,all": 0.8157142857142857
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.3981776765375854,
60
+ "acc,exam_id__2016-20a": 0.325,
61
+ "acc,exam_id__2012-07": 0.4125,
62
+ "acc,exam_id__2015-16": 0.3375,
63
+ "acc,exam_id__2010-01": 0.3176470588235294,
64
+ "acc,exam_id__2016-21": 0.3625,
65
+ "acc,exam_id__2017-23": 0.3625,
66
+ "acc,exam_id__2012-08": 0.3875,
67
+ "acc,exam_id__2012-06": 0.5125,
68
+ "acc,exam_id__2014-15": 0.46153846153846156,
69
+ "acc,exam_id__2011-04": 0.3125,
70
+ "acc,exam_id__2014-13": 0.3375,
71
+ "acc,exam_id__2013-12": 0.425,
72
+ "acc,exam_id__2014-14": 0.5125,
73
+ "acc,exam_id__2017-22": 0.5375,
74
+ "acc,exam_id__2013-10": 0.35,
75
+ "acc,exam_id__2018-25": 0.3875,
76
+ "acc,exam_id__2012-06a": 0.425,
77
+ "acc,exam_id__2016-19": 0.47435897435897434,
78
+ "acc,exam_id__2017-24": 0.3875,
79
+ "acc,exam_id__2011-05": 0.35,
80
+ "acc,exam_id__2015-17": 0.44871794871794873,
81
+ "acc,exam_id__2011-03": 0.35353535353535354,
82
+ "acc,exam_id__2010-02": 0.42,
83
+ "acc,exam_id__2013-11": 0.45,
84
+ "acc,exam_id__2012-09": 0.35064935064935066,
85
+ "acc,exam_id__2016-20": 0.4,
86
+ "acc,exam_id__2015-18": 0.3625,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6536416538696902,
92
+ "acc,all": 0.6686251468860165
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6924780806431777,
96
+ "acc,all": 0.7074626865671642,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff1779f53a0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff1779f4d60>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff1779f4fe0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff1779f5580>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff1779f5800>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7ff1779f4720>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7ff1779f49a0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia-temp/tweetsentbr",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "id_sampler",
990
+ "sampler_config": {
991
+ "id_list": [
992
+ "862006098672459776",
993
+ "861612241703063552",
994
+ "861833257087848448",
995
+ "861283345476571138",
996
+ "861283000335695873",
997
+ "862139461274152962",
998
+ "862139468702265344",
999
+ "862006107702734848",
1000
+ "862004354458537984",
1001
+ "861833322925883392",
1002
+ "861603063190171648",
1003
+ "862139462716989440",
1004
+ "862005877355810818",
1005
+ "861751885862244353",
1006
+ "862045180261695489",
1007
+ "862004252499226630",
1008
+ "862023970828292097",
1009
+ "862041752127107074",
1010
+ "862034961863503872",
1011
+ "861293756548608001",
1012
+ "861993527575695360",
1013
+ "862003099355021315",
1014
+ "862002404086206467",
1015
+ "861282989602463744",
1016
+ "862139454399668229",
1017
+ "862139463769743361",
1018
+ "862054906689138688",
1019
+ "862139446535360513",
1020
+ "861997363744911361",
1021
+ "862057988898648065",
1022
+ "861329080083521536",
1023
+ "861286289034838016",
1024
+ "861833050526806017",
1025
+ "861300658565255169",
1026
+ "861989003821813760",
1027
+ "861682750398631938",
1028
+ "861283275716907008",
1029
+ "861283402523267072",
1030
+ "861873108147466240",
1031
+ "862139462138171392",
1032
+ "861284090271715333",
1033
+ "862139446149427201",
1034
+ "861629109331525633",
1035
+ "861721698609098753",
1036
+ "862139453124612096",
1037
+ "861283339482914816",
1038
+ "861282466291748867",
1039
+ "862055346759749632",
1040
+ "862003019860389891",
1041
+ "862140698346344449",
1042
+ "862084376280092672",
1043
+ "862003058708017152",
1044
+ "862000677345787904",
1045
+ "862029129310502913",
1046
+ "862005822376882178",
1047
+ "861969836297134085",
1048
+ "861302955361927168",
1049
+ "862064949451005953",
1050
+ "861282589541355520",
1051
+ "862005476858486784",
1052
+ "862004684411850757",
1053
+ "862139471101349890",
1054
+ "862139467146170368",
1055
+ "862139475098558465",
1056
+ "862140706550403072",
1057
+ "861282777001537536",
1058
+ "862003184147079169",
1059
+ "861283410656059394",
1060
+ "861283417857691649",
1061
+ "861888778922856448",
1062
+ "861655860812099585",
1063
+ "861834248063504384",
1064
+ "862005210935382017",
1065
+ "861282716930760704",
1066
+ "861287082433622022"
1067
+ ],
1068
+ "id_column": "id"
1069
+ }
1070
+ },
1071
+ "num_fewshot": 25,
1072
+ "metric_list": [
1073
+ {
1074
+ "metric": "f1_macro",
1075
+ "aggregation": "f1_macro",
1076
+ "higher_is_better": true
1077
+ },
1078
+ {
1079
+ "metric": "acc",
1080
+ "aggregation": "acc",
1081
+ "higher_is_better": true
1082
+ }
1083
+ ],
1084
+ "output_type": "generate_until",
1085
+ "generation_kwargs": {
1086
+ "max_gen_toks": 32,
1087
+ "do_sample": false,
1088
+ "temperature": 0.0,
1089
+ "top_k": null,
1090
+ "top_p": null,
1091
+ "until": [
1092
+ "\n\n"
1093
+ ]
1094
  },
1095
+ "repeats": 1,
1096
+ "filter_list": [
1097
+ {
1098
+ "name": "all",
1099
+ "filter": [
1100
+ {
1101
+ "function": "find_similar_label",
1102
+ "labels": [
1103
+ "Positivo",
1104
+ "Neutro",
1105
+ "Negativo"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "function": "take_first"
1110
+ }
1111
+ ]
1112
+ }
1113
+ ],
1114
+ "should_decontaminate": false,
1115
+ "metadata": {
1116
+ "version": 1.0
1117
  }
 
1118
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1119
  },
1120
+ "versions": {
1121
+ "assin2_rte": 1.1,
1122
+ "assin2_sts": 1.1,
1123
+ "bluex": 1.1,
1124
+ "enem_challenge": 1.1,
1125
+ "faquad_nli": 1.1,
1126
+ "hatebr_offensive": 1.0,
1127
+ "oab_exams": 1.5,
1128
+ "portuguese_hate_speech": 1.0,
1129
+ "tweetsentbr": 1.0
 
 
 
 
1130
  },
1131
+ "n-shot": {
1132
+ "assin2_rte": 15,
1133
+ "assin2_sts": 15,
1134
+ "bluex": 3,
1135
+ "enem_challenge": 3,
1136
+ "faquad_nli": 15,
1137
+ "hatebr_offensive": 25,
1138
+ "oab_exams": 3,
1139
+ "portuguese_hate_speech": 25,
1140
+ "tweetsentbr": 25
 
 
 
 
1141
  },
1142
+ "model_meta": {
1143
+ "truncated": 3,
1144
+ "non_truncated": 14147,
1145
+ "padded": 0,
1146
+ "non_padded": 14150,
1147
+ "fewshots_truncated": 3,
1148
+ "has_chat_template": true,
1149
+ "chat_type": "user_assistant",
1150
+ "n_gpus": 1,
1151
+ "accelerate_num_process": null,
1152
+ "model_sha": "3bd728425d680d1f0472b1cdc553f036bfc90c48",
1153
+ "model_dtype": "torch.float16",
1154
+ "model_memory_footprint": 15020343296,
1155
+ "model_num_parameters": 7241732096,
1156
+ "model_is_loaded_in_4bit": null,
1157
+ "model_is_loaded_in_8bit": null,
1158
+ "model_is_quantized": null,
1159
+ "model_device": "cuda:1",
1160
+ "batch_size": 8,
1161
+ "max_length": 2560,
1162
+ "max_ctx_length": 2528,
1163
+ "max_gen_toks": 32
1164
  },
1165
+ "task_model_meta": {
1166
+ "assin2_rte": {
1167
+ "sample_size": 2448,
1168
+ "truncated": 0,
1169
+ "non_truncated": 2448,
1170
+ "padded": 0,
1171
+ "non_padded": 2448,
1172
+ "fewshots_truncated": 0,
1173
+ "mean_seq_length": 1451.7455065359477,
1174
+ "min_seq_length": 1428,
1175
+ "max_seq_length": 1518,
1176
+ "max_ctx_length": 2528,
1177
+ "max_gen_toks": 32,
1178
+ "mean_original_fewshots_size": 15.0,
1179
+ "mean_effective_fewshot_size": 15.0
1180
+ },
1181
+ "assin2_sts": {
1182
+ "sample_size": 2448,
1183
+ "truncated": 0,
1184
+ "non_truncated": 2448,
1185
+ "padded": 0,
1186
+ "non_padded": 2448,
1187
+ "fewshots_truncated": 0,
1188
+ "mean_seq_length": 1675.7455065359477,
1189
+ "min_seq_length": 1652,
1190
+ "max_seq_length": 1742,
1191
+ "max_ctx_length": 2528,
1192
+ "max_gen_toks": 32,
1193
+ "mean_original_fewshots_size": 15.0,
1194
+ "mean_effective_fewshot_size": 15.0
1195
+ },
1196
+ "bluex": {
1197
+ "sample_size": 719,
1198
+ "truncated": 1,
1199
+ "non_truncated": 718,
1200
+ "padded": 0,
1201
+ "non_padded": 719,
1202
+ "fewshots_truncated": 1,
1203
+ "mean_seq_length": 1744.9262865090404,
1204
+ "min_seq_length": 1368,
1205
+ "max_seq_length": 2545,
1206
+ "max_ctx_length": 2528,
1207
+ "max_gen_toks": 32,
1208
+ "mean_original_fewshots_size": 3.0,
1209
+ "mean_effective_fewshot_size": 2.998609179415855
1210
+ },
1211
+ "enem_challenge": {
1212
+ "sample_size": 1429,
1213
+ "truncated": 2,
1214
+ "non_truncated": 1427,
1215
+ "padded": 0,
1216
+ "non_padded": 1429,
1217
+ "fewshots_truncated": 2,
1218
+ "mean_seq_length": 1645.039188243527,
1219
+ "min_seq_length": 1379,
1220
+ "max_seq_length": 2643,
1221
+ "max_ctx_length": 2528,
1222
+ "max_gen_toks": 32,
1223
+ "mean_original_fewshots_size": 3.0,
1224
+ "mean_effective_fewshot_size": 2.998600419874038
1225
+ },
1226
+ "faquad_nli": {
1227
+ "sample_size": 650,
1228
+ "truncated": 0,
1229
+ "non_truncated": 650,
1230
+ "padded": 0,
1231
+ "non_padded": 650,
1232
+ "fewshots_truncated": 0,
1233
+ "mean_seq_length": 1691.9876923076922,
1234
+ "min_seq_length": 1636,
1235
+ "max_seq_length": 1812,
1236
+ "max_ctx_length": 2528,
1237
+ "max_gen_toks": 32,
1238
+ "mean_original_fewshots_size": 15.0,
1239
+ "mean_effective_fewshot_size": 15.0
1240
+ },
1241
+ "hatebr_offensive": {
1242
+ "sample_size": 1400,
1243
+ "truncated": 0,
1244
+ "non_truncated": 1400,
1245
+ "padded": 0,
1246
+ "non_padded": 1400,
1247
+ "fewshots_truncated": 0,
1248
+ "mean_seq_length": 1462.3878571428572,
1249
+ "min_seq_length": 1439,
1250
+ "max_seq_length": 1713,
1251
+ "max_ctx_length": 2528,
1252
+ "max_gen_toks": 32,
1253
+ "mean_original_fewshots_size": 25.0,
1254
+ "mean_effective_fewshot_size": 25.0
1255
+ },
1256
+ "oab_exams": {
1257
+ "sample_size": 2195,
1258
+ "truncated": 0,
1259
+ "non_truncated": 2195,
1260
+ "padded": 0,
1261
+ "non_padded": 2195,
1262
+ "fewshots_truncated": 0,
1263
+ "mean_seq_length": 1390.764464692483,
1264
+ "min_seq_length": 1124,
1265
+ "max_seq_length": 1893,
1266
+ "max_ctx_length": 2528,
1267
+ "max_gen_toks": 32,
1268
+ "mean_original_fewshots_size": 3.0,
1269
+ "mean_effective_fewshot_size": 3.0
1270
+ },
1271
+ "portuguese_hate_speech": {
1272
+ "sample_size": 851,
1273
+ "truncated": 0,
1274
+ "non_truncated": 851,
1275
+ "padded": 0,
1276
+ "non_padded": 851,
1277
+ "fewshots_truncated": 0,
1278
+ "mean_seq_length": 1963.3360752056403,
1279
+ "min_seq_length": 1928,
1280
+ "max_seq_length": 2002,
1281
+ "max_ctx_length": 2528,
1282
+ "max_gen_toks": 32,
1283
+ "mean_original_fewshots_size": 25.0,
1284
+ "mean_effective_fewshot_size": 25.0
1285
+ },
1286
+ "tweetsentbr": {
1287
+ "sample_size": 2010,
1288
+ "truncated": 0,
1289
+ "non_truncated": 2010,
1290
+ "padded": 0,
1291
+ "non_padded": 2010,
1292
+ "fewshots_truncated": 0,
1293
+ "mean_seq_length": 1709.2492537313433,
1294
+ "min_seq_length": 1688,
1295
+ "max_seq_length": 1804,
1296
+ "max_ctx_length": 2528,
1297
+ "max_gen_toks": 32,
1298
+ "mean_original_fewshots_size": 25.0,
1299
+ "mean_effective_fewshot_size": 25.0
1300
+ }
1301
  },
1302
+ "config": {
1303
+ "model": "huggingface",
1304
+ "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=float16,device=cuda:1,revision=main,trust_remote_code=True,starting_max_length=2560",
1305
+ "batch_size": "auto",
1306
+ "batch_sizes": [],
1307
+ "device": null,
1308
+ "use_cache": null,
1309
+ "limit": [
1310
+ null,
1311
+ null,
1312
+ null,
1313
+ null,
1314
+ null,
1315
+ null,
1316
+ null,
1317
+ null,
1318
+ null
1319
+ ],
1320
+ "bootstrap_iters": 0,
1321
+ "gen_kwargs": null
1322
  },
1323
+ "git_hash": null
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324
  }
JJhooww/Mistral_Relora_Step2k/raw_2024-04-20T03-09-26.234801/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9121625173669783,
5
- "acc,all": 0.9121732026143791,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7065946896645577,
10
- "mse,all": 0.7988970588235295,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5159944367176634,
15
- "acc,exam_id__USP_2023": 0.5909090909090909,
16
- "acc,exam_id__UNICAMP_2019": 0.52,
17
- "acc,exam_id__UNICAMP_2023": 0.627906976744186,
18
- "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
19
- "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
20
- "acc,exam_id__USP_2018": 0.4444444444444444,
21
- "acc,exam_id__USP_2022": 0.4489795918367347,
22
- "acc,exam_id__USP_2020": 0.5357142857142857,
23
- "acc,exam_id__USP_2019": 0.525,
24
- "acc,exam_id__USP_2024": 0.7073170731707317,
25
- "acc,exam_id__UNICAMP_2018": 0.3888888888888889,
26
- "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274,
27
- "acc,exam_id__UNICAMP_2020": 0.45454545454545453,
28
- "acc,exam_id__USP_2021": 0.4807692307692308,
29
- "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6179146256123164,
35
- "acc,exam_id__2014": 0.6513761467889908,
36
- "acc,exam_id__2017": 0.6206896551724138,
37
- "acc,exam_id__2009": 0.6086956521739131,
38
- "acc,exam_id__2016": 0.6033057851239669,
39
- "acc,exam_id__2013": 0.6203703703703703,
40
- "acc,exam_id__2015": 0.5630252100840336,
41
- "acc,exam_id__2016_2": 0.6178861788617886,
42
- "acc,exam_id__2023": 0.6518518518518519,
43
- "acc,exam_id__2011": 0.6837606837606838,
44
- "acc,exam_id__2022": 0.5939849624060151,
45
- "acc,exam_id__2012": 0.6120689655172413,
46
- "acc,exam_id__2010": 0.5897435897435898
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.6466313961043266,
50
- "acc,all": 0.8107692307692308,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8143254279726638,
56
- "acc,all": 0.8164285714285714
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.39635535307517084,
60
- "acc,exam_id__2012-06a": 0.425,
61
- "acc,exam_id__2013-10": 0.35,
62
- "acc,exam_id__2010-02": 0.42,
63
- "acc,exam_id__2016-21": 0.35,
64
- "acc,exam_id__2014-13": 0.35,
65
- "acc,exam_id__2014-15": 0.46153846153846156,
66
- "acc,exam_id__2017-24": 0.3625,
67
- "acc,exam_id__2016-20": 0.4,
68
- "acc,exam_id__2018-25": 0.3875,
69
- "acc,exam_id__2010-01": 0.3176470588235294,
70
- "acc,exam_id__2015-16": 0.3375,
71
- "acc,exam_id__2016-20a": 0.325,
72
- "acc,exam_id__2011-05": 0.35,
73
- "acc,exam_id__2013-11": 0.45,
74
- "acc,exam_id__2013-12": 0.425,
75
- "acc,exam_id__2014-14": 0.5,
76
- "acc,exam_id__2016-19": 0.47435897435897434,
77
- "acc,exam_id__2015-18": 0.375,
78
- "acc,exam_id__2017-23": 0.3625,
79
- "acc,exam_id__2015-17": 0.44871794871794873,
80
- "acc,exam_id__2011-04": 0.3375,
81
- "acc,exam_id__2011-03": 0.3434343434343434,
82
- "acc,exam_id__2012-06": 0.5,
83
- "acc,exam_id__2012-08": 0.3625,
84
- "acc,exam_id__2017-22": 0.5375,
85
- "acc,exam_id__2012-07": 0.4125,
86
- "acc,exam_id__2012-09": 0.35064935064935066,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.652940879778074,
92
- "acc,all": 0.6674500587544065
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.5167597069914197,
96
- "acc,all": 0.6950248756218905,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff44266dee0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7ff44266d8a0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff44266db20>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7ff44266e0c0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff44266e340>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7ff44266d260>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7ff44266d4e0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 3,
1064
- "non_truncated": 14147,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 3,
1068
- "has_chat_template": true,
1069
- "chat_type": "user_assistant",
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "3c5ea0dafa4d1019739691987b88f38dae28eba1",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 15020343296,
1075
- "model_num_parameters": 7241732096,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 64,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1451.7455065359477,
1094
- "min_seq_length": 1428,
1095
- "max_seq_length": 1518,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1675.7455065359477,
1109
- "min_seq_length": 1652,
1110
- "max_seq_length": 1742,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 1,
1119
- "non_truncated": 718,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 1,
1123
- "mean_seq_length": 1744.9262865090404,
1124
- "min_seq_length": 1368,
1125
- "max_seq_length": 2545,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 2.998609179415855
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 2,
1134
- "non_truncated": 1427,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 2,
1138
- "mean_seq_length": 1645.039188243527,
1139
- "min_seq_length": 1379,
1140
- "max_seq_length": 2643,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.998600419874038
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1691.9876923076922,
1154
- "min_seq_length": 1636,
1155
- "max_seq_length": 1812,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1462.3878571428572,
1169
- "min_seq_length": 1439,
1170
- "max_seq_length": 1713,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1390.764464692483,
1184
- "min_seq_length": 1124,
1185
- "max_seq_length": 1893,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1963.3360752056403,
1199
- "min_seq_length": 1928,
1200
- "max_seq_length": 2002,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1881.2492537313433,
1214
- "min_seq_length": 1860,
1215
- "max_seq_length": 1976,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "0e4d6ae"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9121625173669783,
5
+ "acc,all": 0.9121732026143791,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7065946896645577,
10
+ "mse,all": 0.7988970588235295,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5159944367176634,
15
+ "acc,exam_id__USP_2023": 0.5909090909090909,
16
+ "acc,exam_id__UNICAMP_2019": 0.52,
17
+ "acc,exam_id__UNICAMP_2023": 0.627906976744186,
18
+ "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
19
+ "acc,exam_id__UNICAMP_2024": 0.4666666666666667,
20
+ "acc,exam_id__USP_2018": 0.4444444444444444,
21
+ "acc,exam_id__USP_2022": 0.4489795918367347,
22
+ "acc,exam_id__USP_2020": 0.5357142857142857,
23
+ "acc,exam_id__USP_2019": 0.525,
24
+ "acc,exam_id__USP_2024": 0.7073170731707317,
25
+ "acc,exam_id__UNICAMP_2018": 0.3888888888888889,
26
+ "acc,exam_id__UNICAMP_2021_2": 0.5098039215686274,
27
+ "acc,exam_id__UNICAMP_2020": 0.45454545454545453,
28
+ "acc,exam_id__USP_2021": 0.4807692307692308,
29
+ "acc,exam_id__UNICAMP_2021_1": 0.5217391304347826,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6179146256123164,
35
+ "acc,exam_id__2014": 0.6513761467889908,
36
+ "acc,exam_id__2017": 0.6206896551724138,
37
+ "acc,exam_id__2009": 0.6086956521739131,
38
+ "acc,exam_id__2016": 0.6033057851239669,
39
+ "acc,exam_id__2013": 0.6203703703703703,
40
+ "acc,exam_id__2015": 0.5630252100840336,
41
+ "acc,exam_id__2016_2": 0.6178861788617886,
42
+ "acc,exam_id__2023": 0.6518518518518519,
43
+ "acc,exam_id__2011": 0.6837606837606838,
44
+ "acc,exam_id__2022": 0.5939849624060151,
45
+ "acc,exam_id__2012": 0.6120689655172413,
46
+ "acc,exam_id__2010": 0.5897435897435898
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.6466313961043266,
50
+ "acc,all": 0.8107692307692308,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8143254279726638,
56
+ "acc,all": 0.8164285714285714
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.39635535307517084,
60
+ "acc,exam_id__2012-06a": 0.425,
61
+ "acc,exam_id__2013-10": 0.35,
62
+ "acc,exam_id__2010-02": 0.42,
63
+ "acc,exam_id__2016-21": 0.35,
64
+ "acc,exam_id__2014-13": 0.35,
65
+ "acc,exam_id__2014-15": 0.46153846153846156,
66
+ "acc,exam_id__2017-24": 0.3625,
67
+ "acc,exam_id__2016-20": 0.4,
68
+ "acc,exam_id__2018-25": 0.3875,
69
+ "acc,exam_id__2010-01": 0.3176470588235294,
70
+ "acc,exam_id__2015-16": 0.3375,
71
+ "acc,exam_id__2016-20a": 0.325,
72
+ "acc,exam_id__2011-05": 0.35,
73
+ "acc,exam_id__2013-11": 0.45,
74
+ "acc,exam_id__2013-12": 0.425,
75
+ "acc,exam_id__2014-14": 0.5,
76
+ "acc,exam_id__2016-19": 0.47435897435897434,
77
+ "acc,exam_id__2015-18": 0.375,
78
+ "acc,exam_id__2017-23": 0.3625,
79
+ "acc,exam_id__2015-17": 0.44871794871794873,
80
+ "acc,exam_id__2011-04": 0.3375,
81
+ "acc,exam_id__2011-03": 0.3434343434343434,
82
+ "acc,exam_id__2012-06": 0.5,
83
+ "acc,exam_id__2012-08": 0.3625,
84
+ "acc,exam_id__2017-22": 0.5375,
85
+ "acc,exam_id__2012-07": 0.4125,
86
+ "acc,exam_id__2012-09": 0.35064935064935066,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.652940879778074,
92
+ "acc,all": 0.6674500587544065
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6890129426552263,
96
+ "acc,all": 0.6950248756218905,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7ff44266dee0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff44266d8a0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff44266db20>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7ff44266e0c0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7ff44266e340>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7ff44266d260>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7ff44266d4e0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 3,
1064
+ "non_truncated": 14147,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 3,
1068
+ "has_chat_template": true,
1069
+ "chat_type": "user_assistant",
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "3c5ea0dafa4d1019739691987b88f38dae28eba1",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 15020343296,
1075
+ "model_num_parameters": 7241732096,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 64,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1451.7455065359477,
1094
+ "min_seq_length": 1428,
1095
+ "max_seq_length": 1518,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1675.7455065359477,
1109
+ "min_seq_length": 1652,
1110
+ "max_seq_length": 1742,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 1,
1119
+ "non_truncated": 718,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 1,
1123
+ "mean_seq_length": 1744.9262865090404,
1124
+ "min_seq_length": 1368,
1125
+ "max_seq_length": 2545,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 2.998609179415855
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 2,
1134
+ "non_truncated": 1427,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 2,
1138
+ "mean_seq_length": 1645.039188243527,
1139
+ "min_seq_length": 1379,
1140
+ "max_seq_length": 2643,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.998600419874038
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1691.9876923076922,
1154
+ "min_seq_length": 1636,
1155
+ "max_seq_length": 1812,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1462.3878571428572,
1169
+ "min_seq_length": 1439,
1170
+ "max_seq_length": 1713,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1390.764464692483,
1184
+ "min_seq_length": 1124,
1185
+ "max_seq_length": 1893,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1963.3360752056403,
1199
+ "min_seq_length": 1928,
1200
+ "max_seq_length": 2002,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1881.2492537313433,
1214
+ "min_seq_length": 1860,
1215
+ "max_seq_length": 1976,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=JJhooww/Mistral_Relora_Step2k,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "0e4d6ae"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
JJhooww/Mistral_Relora_Step2k/results_2024-03-09T08-42-21.029909.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6441765668293375,
38
- "all_grouped_npm": 0.4715334429973541,
39
  "all_grouped": {
40
  "enem_challenge": 0.615815255423373,
41
  "bluex": 0.5257301808066759,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.6526577185427341,
46
  "hatebr_offensive": 0.8133973664850924,
47
  "portuguese_hate_speech": 0.6536416538696902,
48
- "tweetsentbr": 0.5193585604823832
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.615815255423373,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6526577185427341,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8133973664850924,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6536416538696902,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5193585604823832
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.615815255423373,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6536416538696902
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5193585604823832,
154
  "acc,all": 0.7074626865671642,
155
- "main_score": 0.5193585604823832
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6634120690694257,
38
+ "all_grouped_npm": 0.5001577022831998,
39
  "all_grouped": {
40
  "enem_challenge": 0.615815255423373,
41
  "bluex": 0.5257301808066759,
 
45
  "faquad_nli": 0.6526577185427341,
46
  "hatebr_offensive": 0.8133973664850924,
47
  "portuguese_hate_speech": 0.6536416538696902,
48
+ "tweetsentbr": 0.6924780806431777
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.615815255423373,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6526577185427341,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8133973664850924,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6536416538696902,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6924780806431777
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.615815255423373,
 
150
  "main_score": 0.6536416538696902
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6924780806431777,
154
  "acc,all": 0.7074626865671642,
155
+ "main_score": 0.6924780806431777
156
  }
157
  },
158
  "config_tasks": {
JJhooww/Mistral_Relora_Step2k/results_2024-04-20T03-09-26.234801.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6421865592536856,
38
- "all_grouped_npm": 0.46863982903930285,
39
  "all_grouped": {
40
  "enem_challenge": 0.6179146256123164,
41
  "bluex": 0.5159944367176634,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.6466313961043266,
46
  "hatebr_offensive": 0.8143254279726638,
47
  "portuguese_hate_speech": 0.652940879778074,
48
- "tweetsentbr": 0.5167597069914197
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6179146256123164,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6466313961043266,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8143254279726638,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.652940879778074,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.5167597069914197
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6179146256123164,
@@ -150,9 +150,9 @@
150
  "main_score": 0.652940879778074
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.5167597069914197,
154
  "acc,all": 0.6950248756218905,
155
- "main_score": 0.5167597069914197
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6613258076607753,
38
+ "all_grouped_npm": 0.4971208534546147,
39
  "all_grouped": {
40
  "enem_challenge": 0.6179146256123164,
41
  "bluex": 0.5159944367176634,
 
45
  "faquad_nli": 0.6466313961043266,
46
  "hatebr_offensive": 0.8143254279726638,
47
  "portuguese_hate_speech": 0.652940879778074,
48
+ "tweetsentbr": 0.6890129426552263
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6179146256123164,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.6466313961043266,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8143254279726638,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.652940879778074,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6890129426552263
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6179146256123164,
 
150
  "main_score": 0.652940879778074
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6890129426552263,
154
  "acc,all": 0.6950248756218905,
155
+ "main_score": 0.6890129426552263
156
  }
157
  },
158
  "config_tasks": {
Kquant03/CognitiveFusion2-4x7B-BF16/raw_2024-05-19T01-32-18.922295/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9199303114955995,
5
- "acc,all": 0.9199346405228758,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7779218286784337,
10
- "mse,all": 0.4303513071895425,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5438108484005564,
15
- "acc,exam_id__UNICAMP_2018": 0.5185185185185185,
16
- "acc,exam_id__USP_2023": 0.5909090909090909,
17
- "acc,exam_id__USP_2022": 0.4897959183673469,
18
- "acc,exam_id__USP_2019": 0.425,
19
- "acc,exam_id__UNICAMP_2019": 0.56,
20
- "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
21
- "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
22
- "acc,exam_id__USP_2018": 0.48148148148148145,
23
- "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
24
- "acc,exam_id__USP_2020": 0.5178571428571429,
25
- "acc,exam_id__USP_2024": 0.7317073170731707,
26
- "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
27
- "acc,exam_id__UNICAMP_2020": 0.6,
28
- "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373,
29
- "acc,exam_id__USP_2021": 0.4807692307692308,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6389083275017495,
35
- "acc,exam_id__2009": 0.6521739130434783,
36
- "acc,exam_id__2016_2": 0.6016260162601627,
37
- "acc,exam_id__2013": 0.6944444444444444,
38
- "acc,exam_id__2016": 0.5867768595041323,
39
- "acc,exam_id__2015": 0.6218487394957983,
40
- "acc,exam_id__2011": 0.6666666666666666,
41
- "acc,exam_id__2014": 0.6146788990825688,
42
- "acc,exam_id__2017": 0.6637931034482759,
43
- "acc,exam_id__2010": 0.6837606837606838,
44
- "acc,exam_id__2022": 0.6165413533834586,
45
- "acc,exam_id__2023": 0.6444444444444445,
46
- "acc,exam_id__2012": 0.6293103448275862
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7730900759529709,
50
- "acc,all": 0.8415384615384616,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8170741905271608,
56
- "acc,all": 0.8214285714285714
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.4145785876993166,
60
- "acc,exam_id__2012-08": 0.4125,
61
- "acc,exam_id__2015-18": 0.4125,
62
- "acc,exam_id__2013-10": 0.4125,
63
- "acc,exam_id__2013-12": 0.4625,
64
- "acc,exam_id__2011-03": 0.3333333333333333,
65
- "acc,exam_id__2012-09": 0.33766233766233766,
66
- "acc,exam_id__2012-07": 0.3625,
67
- "acc,exam_id__2016-21": 0.375,
68
- "acc,exam_id__2012-06": 0.5,
69
- "acc,exam_id__2013-11": 0.45,
70
- "acc,exam_id__2016-19": 0.5,
71
- "acc,exam_id__2012-06a": 0.3625,
72
- "acc,exam_id__2014-14": 0.5125,
73
- "acc,exam_id__2017-22": 0.5125,
74
- "acc,exam_id__2018-25": 0.45,
75
- "acc,exam_id__2014-13": 0.35,
76
- "acc,exam_id__2017-23": 0.425,
77
- "acc,exam_id__2017-24": 0.3875,
78
- "acc,exam_id__2011-04": 0.4,
79
- "acc,exam_id__2010-02": 0.42,
80
- "acc,exam_id__2015-17": 0.5,
81
- "acc,exam_id__2011-05": 0.45,
82
- "acc,exam_id__2014-15": 0.44871794871794873,
83
- "acc,exam_id__2016-20": 0.3625,
84
- "acc,exam_id__2010-01": 0.36470588235294116,
85
- "acc,exam_id__2016-20a": 0.3375,
86
- "acc,exam_id__2015-16": 0.375,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.7076245180055771,
92
- "acc,all": 0.7579318448883666
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.49451740077068485,
96
- "acc,all": 0.7069651741293532,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7f7d743b82c0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7f7d74383c40>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f7d74383ec0>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7f7d743b84a0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f7d743b8720>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7f7d74383600>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7f7d74383880>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": false,
1069
- "chat_type": null,
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "db45b86c462bb93db7ba4f2c3fe3517582c859a1",
1073
- "model_dtype": "torch.bfloat16",
1074
- "model_memory_footprint": 48844259328,
1075
- "model_num_parameters": 24153690112,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 16,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1369.7455065359477,
1094
- "min_seq_length": 1346,
1095
- "max_seq_length": 1436,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1593.7455065359477,
1109
- "min_seq_length": 1570,
1110
- "max_seq_length": 1660,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1719.9262865090404,
1124
- "min_seq_length": 1343,
1125
- "max_seq_length": 2520,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1620.039188243527,
1139
- "min_seq_length": 1354,
1140
- "max_seq_length": 2618,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1594.9876923076922,
1154
- "min_seq_length": 1539,
1155
- "max_seq_length": 1715,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1305.3878571428572,
1169
- "min_seq_length": 1282,
1170
- "max_seq_length": 1556,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1365.764464692483,
1184
- "min_seq_length": 1099,
1185
- "max_seq_length": 1868,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1806.3360752056403,
1199
- "min_seq_length": 1771,
1200
- "max_seq_length": 1845,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1552.2492537313433,
1214
- "min_seq_length": 1531,
1215
- "max_seq_length": 1647,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=Kquant03/CognitiveFusion2-4x7B-BF16,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "51e0e5e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9199303114955995,
5
+ "acc,all": 0.9199346405228758,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7779218286784337,
10
+ "mse,all": 0.4303513071895425,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5438108484005564,
15
+ "acc,exam_id__UNICAMP_2018": 0.5185185185185185,
16
+ "acc,exam_id__USP_2023": 0.5909090909090909,
17
+ "acc,exam_id__USP_2022": 0.4897959183673469,
18
+ "acc,exam_id__USP_2019": 0.425,
19
+ "acc,exam_id__UNICAMP_2019": 0.56,
20
+ "acc,exam_id__UNICAMP_2021_1": 0.5434782608695652,
21
+ "acc,exam_id__UNICAMP_2023": 0.6046511627906976,
22
+ "acc,exam_id__USP_2018": 0.48148148148148145,
23
+ "acc,exam_id__UNICAMP_2022": 0.5897435897435898,
24
+ "acc,exam_id__USP_2020": 0.5178571428571429,
25
+ "acc,exam_id__USP_2024": 0.7317073170731707,
26
+ "acc,exam_id__UNICAMP_2024": 0.5111111111111111,
27
+ "acc,exam_id__UNICAMP_2020": 0.6,
28
+ "acc,exam_id__UNICAMP_2021_2": 0.5490196078431373,
29
+ "acc,exam_id__USP_2021": 0.4807692307692308,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6389083275017495,
35
+ "acc,exam_id__2009": 0.6521739130434783,
36
+ "acc,exam_id__2016_2": 0.6016260162601627,
37
+ "acc,exam_id__2013": 0.6944444444444444,
38
+ "acc,exam_id__2016": 0.5867768595041323,
39
+ "acc,exam_id__2015": 0.6218487394957983,
40
+ "acc,exam_id__2011": 0.6666666666666666,
41
+ "acc,exam_id__2014": 0.6146788990825688,
42
+ "acc,exam_id__2017": 0.6637931034482759,
43
+ "acc,exam_id__2010": 0.6837606837606838,
44
+ "acc,exam_id__2022": 0.6165413533834586,
45
+ "acc,exam_id__2023": 0.6444444444444445,
46
+ "acc,exam_id__2012": 0.6293103448275862
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7730900759529709,
50
+ "acc,all": 0.8415384615384616,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8170741905271608,
56
+ "acc,all": 0.8214285714285714
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.4145785876993166,
60
+ "acc,exam_id__2012-08": 0.4125,
61
+ "acc,exam_id__2015-18": 0.4125,
62
+ "acc,exam_id__2013-10": 0.4125,
63
+ "acc,exam_id__2013-12": 0.4625,
64
+ "acc,exam_id__2011-03": 0.3333333333333333,
65
+ "acc,exam_id__2012-09": 0.33766233766233766,
66
+ "acc,exam_id__2012-07": 0.3625,
67
+ "acc,exam_id__2016-21": 0.375,
68
+ "acc,exam_id__2012-06": 0.5,
69
+ "acc,exam_id__2013-11": 0.45,
70
+ "acc,exam_id__2016-19": 0.5,
71
+ "acc,exam_id__2012-06a": 0.3625,
72
+ "acc,exam_id__2014-14": 0.5125,
73
+ "acc,exam_id__2017-22": 0.5125,
74
+ "acc,exam_id__2018-25": 0.45,
75
+ "acc,exam_id__2014-13": 0.35,
76
+ "acc,exam_id__2017-23": 0.425,
77
+ "acc,exam_id__2017-24": 0.3875,
78
+ "acc,exam_id__2011-04": 0.4,
79
+ "acc,exam_id__2010-02": 0.42,
80
+ "acc,exam_id__2015-17": 0.5,
81
+ "acc,exam_id__2011-05": 0.45,
82
+ "acc,exam_id__2014-15": 0.44871794871794873,
83
+ "acc,exam_id__2016-20": 0.3625,
84
+ "acc,exam_id__2010-01": 0.36470588235294116,
85
+ "acc,exam_id__2016-20a": 0.3375,
86
+ "acc,exam_id__2015-16": 0.375,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.7076245180055771,
92
+ "acc,all": 0.7579318448883666
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6593565343609131,
96
+ "acc,all": 0.7069651741293532,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7f7d743b82c0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7f7d74383c40>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f7d74383ec0>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7f7d743b84a0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7f7d743b8720>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7f7d74383600>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7f7d74383880>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": false,
1069
+ "chat_type": null,
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "db45b86c462bb93db7ba4f2c3fe3517582c859a1",
1073
+ "model_dtype": "torch.bfloat16",
1074
+ "model_memory_footprint": 48844259328,
1075
+ "model_num_parameters": 24153690112,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 16,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1369.7455065359477,
1094
+ "min_seq_length": 1346,
1095
+ "max_seq_length": 1436,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1593.7455065359477,
1109
+ "min_seq_length": 1570,
1110
+ "max_seq_length": 1660,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1719.9262865090404,
1124
+ "min_seq_length": 1343,
1125
+ "max_seq_length": 2520,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1620.039188243527,
1139
+ "min_seq_length": 1354,
1140
+ "max_seq_length": 2618,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1594.9876923076922,
1154
+ "min_seq_length": 1539,
1155
+ "max_seq_length": 1715,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1305.3878571428572,
1169
+ "min_seq_length": 1282,
1170
+ "max_seq_length": 1556,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1365.764464692483,
1184
+ "min_seq_length": 1099,
1185
+ "max_seq_length": 1868,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1806.3360752056403,
1199
+ "min_seq_length": 1771,
1200
+ "max_seq_length": 1845,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1552.2492537313433,
1214
+ "min_seq_length": 1531,
1215
+ "max_seq_length": 1647,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=Kquant03/CognitiveFusion2-4x7B-BF16,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "51e0e5e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
Kquant03/CognitiveFusion2-4x7B-BF16/results_2024-05-19T01-32-18.922295.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.67638400989245,
38
- "all_grouped_npm": 0.5223190853843742,
39
  "all_grouped": {
40
  "enem_challenge": 0.6389083275017495,
41
  "bluex": 0.5438108484005564,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7730900759529709,
46
  "hatebr_offensive": 0.8170741905271608,
47
  "portuguese_hate_speech": 0.7076245180055771,
48
- "tweetsentbr": 0.49451740077068485
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6389083275017495,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7730900759529709,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8170741905271608,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7076245180055771,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.49451740077068485
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6389083275017495,
@@ -150,9 +150,9 @@
150
  "main_score": 0.7076245180055771
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.49451740077068485,
154
  "acc,all": 0.7069651741293532,
155
- "main_score": 0.49451740077068485
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.6946994691802532,
38
+ "all_grouped_npm": 0.5495742331340812,
39
  "all_grouped": {
40
  "enem_challenge": 0.6389083275017495,
41
  "bluex": 0.5438108484005564,
 
45
  "faquad_nli": 0.7730900759529709,
46
  "hatebr_offensive": 0.8170741905271608,
47
  "portuguese_hate_speech": 0.7076245180055771,
48
+ "tweetsentbr": 0.6593565343609131
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6389083275017495,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7730900759529709,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8170741905271608,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.7076245180055771,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6593565343609131
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6389083275017495,
 
150
  "main_score": 0.7076245180055771
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6593565343609131,
154
  "acc,all": 0.7069651741293532,
155
+ "main_score": 0.6593565343609131
156
  }
157
  },
158
  "config_tasks": {
Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/raw_2024-07-28T01-32-22.165106/results.json CHANGED
@@ -1,1244 +1,1244 @@
1
  {
2
- "results": {
3
- "assin2_rte": {
4
- "f1_macro,all": 0.9207426934587088,
5
- "acc,all": 0.920751633986928,
6
- "alias": "assin2_rte"
7
- },
8
- "assin2_sts": {
9
- "pearson,all": 0.7806469957644414,
10
- "mse,all": 0.43004901960784325,
11
- "alias": "assin2_sts"
12
- },
13
- "bluex": {
14
- "acc,all": 0.5271210013908206,
15
- "acc,exam_id__USP_2019": 0.45,
16
- "acc,exam_id__USP_2018": 0.3888888888888889,
17
- "acc,exam_id__USP_2020": 0.5714285714285714,
18
- "acc,exam_id__USP_2022": 0.4897959183673469,
19
- "acc,exam_id__USP_2023": 0.5909090909090909,
20
- "acc,exam_id__UNICAMP_2019": 0.54,
21
- "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
22
- "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
23
- "acc,exam_id__UNICAMP_2021_1": 0.5,
24
- "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
25
- "acc,exam_id__UNICAMP_2024": 0.5777777777777777,
26
- "acc,exam_id__UNICAMP_2018": 0.4444444444444444,
27
- "acc,exam_id__USP_2024": 0.7560975609756098,
28
- "acc,exam_id__USP_2021": 0.4807692307692308,
29
- "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
30
- "alias": "bluex"
31
- },
32
- "enem_challenge": {
33
- "alias": "enem",
34
- "acc,all": 0.6431070678796361,
35
- "acc,exam_id__2016": 0.5702479338842975,
36
- "acc,exam_id__2013": 0.6666666666666666,
37
- "acc,exam_id__2012": 0.6379310344827587,
38
- "acc,exam_id__2023": 0.6814814814814815,
39
- "acc,exam_id__2016_2": 0.6504065040650406,
40
- "acc,exam_id__2010": 0.7094017094017094,
41
- "acc,exam_id__2009": 0.6173913043478261,
42
- "acc,exam_id__2011": 0.7008547008547008,
43
- "acc,exam_id__2015": 0.5966386554621849,
44
- "acc,exam_id__2014": 0.5871559633027523,
45
- "acc,exam_id__2017": 0.6810344827586207,
46
- "acc,exam_id__2022": 0.6165413533834586
47
- },
48
- "faquad_nli": {
49
- "f1_macro,all": 0.7641446815289443,
50
- "acc,all": 0.8107692307692308,
51
- "alias": "faquad_nli"
52
- },
53
- "hatebr_offensive": {
54
- "alias": "hatebr_offensive_binary",
55
- "f1_macro,all": 0.8412698412698412,
56
- "acc,all": 0.8428571428571429
57
- },
58
- "oab_exams": {
59
- "acc,all": 0.41594533029612757,
60
- "acc,exam_id__2017-24": 0.3375,
61
- "acc,exam_id__2011-05": 0.45,
62
- "acc,exam_id__2015-16": 0.35,
63
- "acc,exam_id__2016-20a": 0.3625,
64
- "acc,exam_id__2011-03": 0.32323232323232326,
65
- "acc,exam_id__2012-09": 0.38961038961038963,
66
- "acc,exam_id__2012-06": 0.5,
67
- "acc,exam_id__2012-06a": 0.375,
68
- "acc,exam_id__2013-11": 0.45,
69
- "acc,exam_id__2013-10": 0.4,
70
- "acc,exam_id__2018-25": 0.4875,
71
- "acc,exam_id__2014-13": 0.3125,
72
- "acc,exam_id__2014-14": 0.5125,
73
- "acc,exam_id__2012-07": 0.3625,
74
- "acc,exam_id__2017-23": 0.4625,
75
- "acc,exam_id__2016-20": 0.3625,
76
- "acc,exam_id__2016-21": 0.3625,
77
- "acc,exam_id__2011-04": 0.4125,
78
- "acc,exam_id__2015-18": 0.4375,
79
- "acc,exam_id__2016-19": 0.48717948717948717,
80
- "acc,exam_id__2014-15": 0.46153846153846156,
81
- "acc,exam_id__2017-22": 0.55,
82
- "acc,exam_id__2012-08": 0.425,
83
- "acc,exam_id__2015-17": 0.5,
84
- "acc,exam_id__2010-01": 0.35294117647058826,
85
- "acc,exam_id__2013-12": 0.4,
86
- "acc,exam_id__2010-02": 0.43,
87
- "alias": "oab_exams"
88
- },
89
- "portuguese_hate_speech": {
90
- "alias": "portuguese_hate_speech_binary",
91
- "f1_macro,all": 0.6731605919879914,
92
- "acc,all": 0.6921269095182139
93
- },
94
- "tweetsentbr": {
95
- "f1_macro,all": 0.49723454010215834,
96
- "acc,all": 0.7059701492537314,
97
- "alias": "tweetsentbr"
98
- }
99
- },
100
- "configs": {
101
- "assin2_rte": {
102
- "task": "assin2_rte",
103
- "group": [
104
- "pt_benchmark",
105
- "assin2"
106
- ],
107
- "dataset_path": "assin2",
108
- "test_split": "test",
109
- "fewshot_split": "train",
110
- "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
- "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
- "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
- "target_delimiter": " ",
114
- "fewshot_delimiter": "\n\n",
115
- "fewshot_config": {
116
- "sampler": "id_sampler",
117
- "sampler_config": {
118
- "id_list": [
119
- 1,
120
- 3251,
121
- 2,
122
- 3252,
123
- 3,
124
- 4,
125
- 5,
126
- 6,
127
- 3253,
128
- 7,
129
- 3254,
130
- 3255,
131
- 3256,
132
- 8,
133
- 9,
134
- 10,
135
- 3257,
136
- 11,
137
- 3258,
138
- 12,
139
- 13,
140
- 14,
141
- 15,
142
- 3259,
143
- 3260,
144
- 3261,
145
- 3262,
146
- 3263,
147
- 16,
148
- 17,
149
- 3264,
150
- 18,
151
- 3265,
152
- 3266,
153
- 3267,
154
- 19,
155
- 20,
156
- 3268,
157
- 3269,
158
- 21,
159
- 3270,
160
- 3271,
161
- 22,
162
- 3272,
163
- 3273,
164
- 23,
165
- 3274,
166
- 24,
167
- 25,
168
- 3275
169
- ],
170
- "id_column": "sentence_pair_id"
171
- }
172
- },
173
- "num_fewshot": 15,
174
- "metric_list": [
175
- {
176
- "metric": "f1_macro",
177
- "aggregation": "f1_macro",
178
- "higher_is_better": true
179
  },
180
- {
181
- "metric": "acc",
182
- "aggregation": "acc",
183
- "higher_is_better": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  }
185
- ],
186
- "output_type": "generate_until",
187
- "generation_kwargs": {
188
- "max_gen_toks": 32,
189
- "do_sample": false,
190
- "temperature": 0.0,
191
- "top_k": null,
192
- "top_p": null,
193
- "until": [
194
- "\n\n"
195
- ]
196
- },
197
- "repeats": 1,
198
- "filter_list": [
199
- {
200
- "name": "all",
201
- "filter": [
202
- {
203
- "function": "find_similar_label",
204
- "labels": [
205
- "Sim",
206
- "Não"
207
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208
  },
209
- {
210
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211
  }
212
- ]
213
- }
214
- ],
215
- "should_decontaminate": false,
216
- "metadata": {
217
- "version": 1.1
218
- }
219
- },
220
- "assin2_sts": {
221
- "task": "assin2_sts",
222
- "group": [
223
- "pt_benchmark",
224
- "assin2"
225
- ],
226
- "dataset_path": "assin2",
227
- "test_split": "test",
228
- "fewshot_split": "train",
229
- "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
- "doc_to_target": "<function assin2_float_to_pt_str at 0x7fe8c9bd68e0>",
231
- "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
- "target_delimiter": " ",
233
- "fewshot_delimiter": "\n\n",
234
- "fewshot_config": {
235
- "sampler": "id_sampler",
236
- "sampler_config": {
237
- "id_list": [
238
- 1,
239
- 3251,
240
- 2,
241
- 3252,
242
- 3,
243
- 4,
244
- 5,
245
- 6,
246
- 3253,
247
- 7,
248
- 3254,
249
- 3255,
250
- 3256,
251
- 8,
252
- 9,
253
- 10,
254
- 3257,
255
- 11,
256
- 3258,
257
- 12,
258
- 13,
259
- 14,
260
- 15,
261
- 3259,
262
- 3260,
263
- 3261,
264
- 3262,
265
- 3263,
266
- 16,
267
- 17,
268
- 3264,
269
- 18,
270
- 3265,
271
- 3266,
272
- 3267,
273
- 19,
274
- 20,
275
- 3268,
276
- 3269,
277
- 21,
278
- 3270,
279
- 3271,
280
- 22,
281
- 3272,
282
- 3273,
283
- 23,
284
- 3274,
285
- 24,
286
- 25,
287
- 3275
288
- ],
289
- "id_column": "sentence_pair_id"
290
- }
291
- },
292
- "num_fewshot": 15,
293
- "metric_list": [
294
- {
295
- "metric": "pearson",
296
- "aggregation": "pearsonr",
297
- "higher_is_better": true
298
  },
299
- {
300
- "metric": "mse",
301
- "aggregation": "mean_squared_error",
302
- "higher_is_better": false
303
- }
304
- ],
305
- "output_type": "generate_until",
306
- "generation_kwargs": {
307
- "max_gen_toks": 32,
308
- "do_sample": false,
309
- "temperature": 0.0,
310
- "top_k": null,
311
- "top_p": null,
312
- "until": [
313
- "\n\n"
314
- ]
315
- },
316
- "repeats": 1,
317
- "filter_list": [
318
- {
319
- "name": "all",
320
- "filter": [
321
- {
322
- "function": "number_filter",
323
- "type": "float",
324
- "range_min": 1.0,
325
- "range_max": 5.0,
326
- "on_outside_range": "clip",
327
- "fallback": 5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  },
329
- {
330
- "function": "take_first"
331
- }
332
- ]
333
- }
334
- ],
335
- "should_decontaminate": false,
336
- "metadata": {
337
- "version": 1.1
338
- }
339
- },
340
- "bluex": {
341
- "task": "bluex",
342
- "group": [
343
- "pt_benchmark",
344
- "vestibular"
345
- ],
346
- "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
- "test_split": "train",
348
- "fewshot_split": "train",
349
- "doc_to_text": "<function enem_doc_to_text at 0x7fe8c9bd62a0>",
350
- "doc_to_target": "{{answerKey}}",
351
- "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
- "target_delimiter": " ",
353
- "fewshot_delimiter": "\n\n",
354
- "fewshot_config": {
355
- "sampler": "id_sampler",
356
- "sampler_config": {
357
- "id_list": [
358
- "USP_2018_3",
359
- "UNICAMP_2018_2",
360
- "USP_2018_35",
361
- "UNICAMP_2018_16",
362
- "USP_2018_89"
363
- ],
364
- "id_column": "id",
365
- "exclude_from_task": true
366
- }
367
- },
368
- "num_fewshot": 3,
369
- "metric_list": [
370
- {
371
- "metric": "acc",
372
- "aggregation": "acc",
373
- "higher_is_better": true
374
- }
375
- ],
376
- "output_type": "generate_until",
377
- "generation_kwargs": {
378
- "max_gen_toks": 32,
379
- "do_sample": false,
380
- "temperature": 0.0,
381
- "top_k": null,
382
- "top_p": null,
383
- "until": [
384
- "\n\n"
385
- ]
386
- },
387
- "repeats": 1,
388
- "filter_list": [
389
- {
390
- "name": "all",
391
- "filter": [
392
- {
393
- "function": "normalize_spaces"
394
  },
395
- {
396
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  },
398
- {
399
- "function": "find_choices",
400
- "choices": [
401
- "A",
402
- "B",
403
- "C",
404
- "D",
405
- "E"
406
- ],
407
- "regex_patterns": [
408
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
- "\\b([ABCDE])\\.",
410
- "\\b([ABCDE]) ?[.):-]",
411
- "\\b([ABCDE])$",
412
- "\\b([ABCDE])\\b"
413
- ]
 
 
414
  },
415
- {
416
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417
  }
418
- ],
419
- "group_by": {
420
- "column": "exam_id"
421
- }
422
- }
423
- ],
424
- "should_decontaminate": true,
425
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fe8c9bd6520>",
426
- "metadata": {
427
- "version": 1.1
428
- }
429
- },
430
- "enem_challenge": {
431
- "task": "enem_challenge",
432
- "task_alias": "enem",
433
- "group": [
434
- "pt_benchmark",
435
- "vestibular"
436
- ],
437
- "dataset_path": "eduagarcia/enem_challenge",
438
- "test_split": "train",
439
- "fewshot_split": "train",
440
- "doc_to_text": "<function enem_doc_to_text at 0x7fe8c9bd6ac0>",
441
- "doc_to_target": "{{answerKey}}",
442
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
- "target_delimiter": " ",
444
- "fewshot_delimiter": "\n\n",
445
- "fewshot_config": {
446
- "sampler": "id_sampler",
447
- "sampler_config": {
448
- "id_list": [
449
- "2022_21",
450
- "2022_88",
451
- "2022_143"
452
- ],
453
- "id_column": "id",
454
- "exclude_from_task": true
455
- }
456
- },
457
- "num_fewshot": 3,
458
- "metric_list": [
459
- {
460
- "metric": "acc",
461
- "aggregation": "acc",
462
- "higher_is_better": true
463
- }
464
- ],
465
- "output_type": "generate_until",
466
- "generation_kwargs": {
467
- "max_gen_toks": 32,
468
- "do_sample": false,
469
- "temperature": 0.0,
470
- "top_k": null,
471
- "top_p": null,
472
- "until": [
473
- "\n\n"
474
- ]
475
- },
476
- "repeats": 1,
477
- "filter_list": [
478
- {
479
- "name": "all",
480
- "filter": [
481
- {
482
- "function": "normalize_spaces"
483
- },
484
- {
485
- "function": "remove_accents"
486
  },
487
- {
488
- "function": "find_choices",
489
- "choices": [
490
- "A",
491
- "B",
492
- "C",
493
- "D",
494
- "E"
495
- ],
496
- "regex_patterns": [
497
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
- "\\b([ABCDE])\\.",
499
- "\\b([ABCDE]) ?[.):-]",
500
- "\\b([ABCDE])$",
501
- "\\b([ABCDE])\\b"
502
- ]
 
 
503
  },
504
- {
505
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506
  }
507
- ],
508
- "group_by": {
509
- "column": "exam_id"
510
- }
511
- }
512
- ],
513
- "should_decontaminate": true,
514
- "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fe8c9bd6d40>",
515
- "metadata": {
516
- "version": 1.1
517
- }
518
- },
519
- "faquad_nli": {
520
- "task": "faquad_nli",
521
- "group": [
522
- "pt_benchmark"
523
- ],
524
- "dataset_path": "ruanchaves/faquad-nli",
525
- "test_split": "test",
526
- "fewshot_split": "train",
527
- "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
- "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
- "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
- "target_delimiter": " ",
531
- "fewshot_delimiter": "\n\n",
532
- "fewshot_config": {
533
- "sampler": "first_n",
534
- "sampler_config": {
535
- "fewshot_indices": [
536
- 1893,
537
- 949,
538
- 663,
539
- 105,
540
- 1169,
541
- 2910,
542
- 2227,
543
- 2813,
544
- 974,
545
- 558,
546
- 1503,
547
- 1958,
548
- 2918,
549
- 601,
550
- 1560,
551
- 984,
552
- 2388,
553
- 995,
554
- 2233,
555
- 1982,
556
- 165,
557
- 2788,
558
- 1312,
559
- 2285,
560
- 522,
561
- 1113,
562
- 1670,
563
- 323,
564
- 236,
565
- 1263,
566
- 1562,
567
- 2519,
568
- 1049,
569
- 432,
570
- 1167,
571
- 1394,
572
- 2022,
573
- 2551,
574
- 2194,
575
- 2187,
576
- 2282,
577
- 2816,
578
- 108,
579
- 301,
580
- 1185,
581
- 1315,
582
- 1420,
583
- 2436,
584
- 2322,
585
- 766
586
- ]
587
- }
588
- },
589
- "num_fewshot": 15,
590
- "metric_list": [
591
- {
592
- "metric": "f1_macro",
593
- "aggregation": "f1_macro",
594
- "higher_is_better": true
595
  },
596
- {
597
- "metric": "acc",
598
- "aggregation": "acc",
599
- "higher_is_better": true
600
- }
601
- ],
602
- "output_type": "generate_until",
603
- "generation_kwargs": {
604
- "max_gen_toks": 32,
605
- "do_sample": false,
606
- "temperature": 0.0,
607
- "top_k": null,
608
- "top_p": null,
609
- "until": [
610
- "\n\n"
611
- ]
612
- },
613
- "repeats": 1,
614
- "filter_list": [
615
- {
616
- "name": "all",
617
- "filter": [
618
- {
619
- "function": "find_similar_label",
620
- "labels": [
621
- "Sim",
622
- "Não"
623
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  },
625
- {
626
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
  }
628
- ]
629
- }
630
- ],
631
- "should_decontaminate": false,
632
- "metadata": {
633
- "version": 1.1
634
- }
635
- },
636
- "hatebr_offensive": {
637
- "task": "hatebr_offensive",
638
- "task_alias": "hatebr_offensive_binary",
639
- "group": [
640
- "pt_benchmark"
641
- ],
642
- "dataset_path": "eduagarcia/portuguese_benchmark",
643
- "dataset_name": "HateBR_offensive_binary",
644
- "test_split": "test",
645
- "fewshot_split": "train",
646
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
- "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
- "target_delimiter": " ",
650
- "fewshot_delimiter": "\n\n",
651
- "fewshot_config": {
652
- "sampler": "id_sampler",
653
- "sampler_config": {
654
- "id_list": [
655
- 48,
656
- 44,
657
- 36,
658
- 20,
659
- 3511,
660
- 88,
661
- 3555,
662
- 16,
663
- 56,
664
- 3535,
665
- 60,
666
- 40,
667
- 3527,
668
- 4,
669
- 76,
670
- 3579,
671
- 3523,
672
- 3551,
673
- 68,
674
- 3503,
675
- 84,
676
- 3539,
677
- 64,
678
- 3599,
679
- 80,
680
- 3563,
681
- 3559,
682
- 3543,
683
- 3547,
684
- 3587,
685
- 3595,
686
- 3575,
687
- 3567,
688
- 3591,
689
- 24,
690
- 96,
691
- 92,
692
- 3507,
693
- 52,
694
- 72,
695
- 8,
696
- 3571,
697
- 3515,
698
- 3519,
699
- 3531,
700
- 28,
701
- 32,
702
- 0,
703
- 12,
704
- 3583
705
- ],
706
- "id_column": "idx"
707
- }
708
- },
709
- "num_fewshot": 25,
710
- "metric_list": [
711
- {
712
- "metric": "f1_macro",
713
- "aggregation": "f1_macro",
714
- "higher_is_better": true
715
  },
716
- {
717
- "metric": "acc",
718
- "aggregation": "acc",
719
- "higher_is_better": true
720
- }
721
- ],
722
- "output_type": "generate_until",
723
- "generation_kwargs": {
724
- "max_gen_toks": 32,
725
- "do_sample": false,
726
- "temperature": 0.0,
727
- "top_k": null,
728
- "top_p": null,
729
- "until": [
730
- "\n\n"
731
- ]
732
- },
733
- "repeats": 1,
734
- "filter_list": [
735
- {
736
- "name": "all",
737
- "filter": [
738
- {
739
- "function": "find_similar_label",
740
- "labels": [
741
- "Sim",
742
- "Não"
743
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
  },
745
- {
746
- "function": "take_first"
747
- }
748
- ]
749
- }
750
- ],
751
- "should_decontaminate": false,
752
- "metadata": {
753
- "version": 1.0
754
- }
755
- },
756
- "oab_exams": {
757
- "task": "oab_exams",
758
- "group": [
759
- "legal_benchmark",
760
- "pt_benchmark"
761
- ],
762
- "dataset_path": "eduagarcia/oab_exams",
763
- "test_split": "train",
764
- "fewshot_split": "train",
765
- "doc_to_text": "<function doc_to_text at 0x7fe8c9bd5c60>",
766
- "doc_to_target": "{{answerKey}}",
767
- "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
- "target_delimiter": " ",
769
- "fewshot_delimiter": "\n\n",
770
- "fewshot_config": {
771
- "sampler": "id_sampler",
772
- "sampler_config": {
773
- "id_list": [
774
- "2010-01_1",
775
- "2010-01_11",
776
- "2010-01_13",
777
- "2010-01_23",
778
- "2010-01_26",
779
- "2010-01_28",
780
- "2010-01_38",
781
- "2010-01_48",
782
- "2010-01_58",
783
- "2010-01_68",
784
- "2010-01_76",
785
- "2010-01_83",
786
- "2010-01_85",
787
- "2010-01_91",
788
- "2010-01_99"
789
- ],
790
- "id_column": "id",
791
- "exclude_from_task": true
792
- }
793
- },
794
- "num_fewshot": 3,
795
- "metric_list": [
796
- {
797
- "metric": "acc",
798
- "aggregation": "acc",
799
- "higher_is_better": true
800
- }
801
- ],
802
- "output_type": "generate_until",
803
- "generation_kwargs": {
804
- "max_gen_toks": 32,
805
- "do_sample": false,
806
- "temperature": 0.0,
807
- "top_k": null,
808
- "top_p": null,
809
- "until": [
810
- "\n\n"
811
- ]
812
- },
813
- "repeats": 1,
814
- "filter_list": [
815
- {
816
- "name": "all",
817
- "filter": [
818
- {
819
- "function": "normalize_spaces"
820
  },
821
- {
822
- "function": "remove_accents"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
823
  },
824
- {
825
- "function": "find_choices",
826
- "choices": [
827
- "A",
828
- "B",
829
- "C",
830
- "D"
831
- ],
832
- "regex_patterns": [
833
- "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
- "\\b([ABCD])\\.",
835
- "\\b([ABCD]) ?[.):-]",
836
- "\\b([ABCD])$",
837
- "\\b([ABCD])\\b"
838
- ]
 
 
 
839
  },
840
- {
841
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
842
  }
843
- ],
844
- "group_by": {
845
- "column": "exam_id"
846
- }
847
- }
848
- ],
849
- "should_decontaminate": true,
850
- "doc_to_decontamination_query": "<function doc_to_text at 0x7fe8c9bd5ee0>",
851
- "metadata": {
852
- "version": 1.5
853
- }
854
- },
855
- "portuguese_hate_speech": {
856
- "task": "portuguese_hate_speech",
857
- "task_alias": "portuguese_hate_speech_binary",
858
- "group": [
859
- "pt_benchmark"
860
- ],
861
- "dataset_path": "eduagarcia/portuguese_benchmark",
862
- "dataset_name": "Portuguese_Hate_Speech_binary",
863
- "test_split": "test",
864
- "fewshot_split": "train",
865
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
- "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
- "target_delimiter": " ",
869
- "fewshot_delimiter": "\n\n",
870
- "fewshot_config": {
871
- "sampler": "id_sampler",
872
- "sampler_config": {
873
- "id_list": [
874
- 52,
875
- 50,
876
- 39,
877
- 28,
878
- 3,
879
- 105,
880
- 22,
881
- 25,
882
- 60,
883
- 11,
884
- 66,
885
- 41,
886
- 9,
887
- 4,
888
- 91,
889
- 42,
890
- 7,
891
- 20,
892
- 76,
893
- 1,
894
- 104,
895
- 13,
896
- 67,
897
- 54,
898
- 97,
899
- 27,
900
- 24,
901
- 14,
902
- 16,
903
- 48,
904
- 53,
905
- 40,
906
- 34,
907
- 49,
908
- 32,
909
- 119,
910
- 114,
911
- 2,
912
- 58,
913
- 83,
914
- 18,
915
- 36,
916
- 5,
917
- 6,
918
- 10,
919
- 35,
920
- 38,
921
- 0,
922
- 21,
923
- 46
924
- ],
925
- "id_column": "idx"
926
- }
927
- },
928
- "num_fewshot": 25,
929
- "metric_list": [
930
- {
931
- "metric": "f1_macro",
932
- "aggregation": "f1_macro",
933
- "higher_is_better": true
934
  },
935
- {
936
- "metric": "acc",
937
- "aggregation": "acc",
938
- "higher_is_better": true
939
- }
940
- ],
941
- "output_type": "generate_until",
942
- "generation_kwargs": {
943
- "max_gen_toks": 32,
944
- "do_sample": false,
945
- "temperature": 0.0,
946
- "top_k": null,
947
- "top_p": null,
948
- "until": [
949
- "\n\n"
950
- ]
951
- },
952
- "repeats": 1,
953
- "filter_list": [
954
- {
955
- "name": "all",
956
- "filter": [
957
- {
958
- "function": "find_similar_label",
959
- "labels": [
960
- "Sim",
961
- "Não"
962
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
  },
964
- {
965
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
966
  }
967
- ]
968
- }
969
- ],
970
- "should_decontaminate": false,
971
- "metadata": {
972
- "version": 1.0
973
- }
974
- },
975
- "tweetsentbr": {
976
- "task": "tweetsentbr",
977
- "group": [
978
- "pt_benchmark"
979
- ],
980
- "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
- "test_split": "test",
982
- "fewshot_split": "train",
983
- "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
- "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
- "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
- "target_delimiter": " ",
987
- "fewshot_delimiter": "\n\n",
988
- "fewshot_config": {
989
- "sampler": "first_n"
990
- },
991
- "num_fewshot": 25,
992
- "metric_list": [
993
- {
994
- "metric": "f1_macro",
995
- "aggregation": "f1_macro",
996
- "higher_is_better": true
997
  },
998
- {
999
- "metric": "acc",
1000
- "aggregation": "acc",
1001
- "higher_is_better": true
1002
- }
1003
- ],
1004
- "output_type": "generate_until",
1005
- "generation_kwargs": {
1006
- "max_gen_toks": 32,
1007
- "do_sample": false,
1008
- "temperature": 0.0,
1009
- "top_k": null,
1010
- "top_p": null,
1011
- "until": [
1012
- "\n\n"
1013
- ]
1014
- },
1015
- "repeats": 1,
1016
- "filter_list": [
1017
- {
1018
- "name": "all",
1019
- "filter": [
1020
- {
1021
- "function": "find_similar_label",
1022
- "labels": [
1023
- "Positivo",
1024
- "Neutro",
1025
- "Negativo"
1026
- ]
1027
  },
1028
- {
1029
- "function": "take_first"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030
  }
1031
- ]
1032
  }
1033
- ],
1034
- "should_decontaminate": false,
1035
- "metadata": {
1036
- "version": 1.0
1037
- }
1038
- }
1039
- },
1040
- "versions": {
1041
- "assin2_rte": 1.1,
1042
- "assin2_sts": 1.1,
1043
- "bluex": 1.1,
1044
- "enem_challenge": 1.1,
1045
- "faquad_nli": 1.1,
1046
- "hatebr_offensive": 1.0,
1047
- "oab_exams": 1.5,
1048
- "portuguese_hate_speech": 1.0,
1049
- "tweetsentbr": 1.0
1050
- },
1051
- "n-shot": {
1052
- "assin2_rte": 15,
1053
- "assin2_sts": 15,
1054
- "bluex": 3,
1055
- "enem_challenge": 3,
1056
- "faquad_nli": 15,
1057
- "hatebr_offensive": 25,
1058
- "oab_exams": 3,
1059
- "portuguese_hate_speech": 25,
1060
- "tweetsentbr": 25
1061
- },
1062
- "model_meta": {
1063
- "truncated": 1,
1064
- "non_truncated": 14149,
1065
- "padded": 0,
1066
- "non_padded": 14150,
1067
- "fewshots_truncated": 1,
1068
- "has_chat_template": false,
1069
- "chat_type": null,
1070
- "n_gpus": 1,
1071
- "accelerate_num_process": null,
1072
- "model_sha": "43ea8d27d652dc15e4d27f665c5d636a5937780b",
1073
- "model_dtype": "torch.float16",
1074
- "model_memory_footprint": 14483472384,
1075
- "model_num_parameters": 7241732096,
1076
- "model_is_loaded_in_4bit": null,
1077
- "model_is_loaded_in_8bit": null,
1078
- "model_is_quantized": null,
1079
- "model_device": "cuda:0",
1080
- "batch_size": 32,
1081
- "max_length": 2560,
1082
- "max_ctx_length": 2528,
1083
- "max_gen_toks": 32
1084
- },
1085
- "task_model_meta": {
1086
- "assin2_rte": {
1087
- "sample_size": 2448,
1088
- "truncated": 0,
1089
- "non_truncated": 2448,
1090
- "padded": 0,
1091
- "non_padded": 2448,
1092
- "fewshots_truncated": 0,
1093
- "mean_seq_length": 1369.7455065359477,
1094
- "min_seq_length": 1346,
1095
- "max_seq_length": 1436,
1096
- "max_ctx_length": 2528,
1097
- "max_gen_toks": 32,
1098
- "mean_original_fewshots_size": 15.0,
1099
- "mean_effective_fewshot_size": 15.0
1100
- },
1101
- "assin2_sts": {
1102
- "sample_size": 2448,
1103
- "truncated": 0,
1104
- "non_truncated": 2448,
1105
- "padded": 0,
1106
- "non_padded": 2448,
1107
- "fewshots_truncated": 0,
1108
- "mean_seq_length": 1593.7455065359477,
1109
- "min_seq_length": 1570,
1110
- "max_seq_length": 1660,
1111
- "max_ctx_length": 2528,
1112
- "max_gen_toks": 32,
1113
- "mean_original_fewshots_size": 15.0,
1114
- "mean_effective_fewshot_size": 15.0
1115
  },
1116
- "bluex": {
1117
- "sample_size": 719,
1118
- "truncated": 0,
1119
- "non_truncated": 719,
1120
- "padded": 0,
1121
- "non_padded": 719,
1122
- "fewshots_truncated": 0,
1123
- "mean_seq_length": 1719.9262865090404,
1124
- "min_seq_length": 1343,
1125
- "max_seq_length": 2520,
1126
- "max_ctx_length": 2528,
1127
- "max_gen_toks": 32,
1128
- "mean_original_fewshots_size": 3.0,
1129
- "mean_effective_fewshot_size": 3.0
1130
  },
1131
- "enem_challenge": {
1132
- "sample_size": 1429,
1133
- "truncated": 1,
1134
- "non_truncated": 1428,
1135
- "padded": 0,
1136
- "non_padded": 1429,
1137
- "fewshots_truncated": 1,
1138
- "mean_seq_length": 1620.039188243527,
1139
- "min_seq_length": 1354,
1140
- "max_seq_length": 2618,
1141
- "max_ctx_length": 2528,
1142
- "max_gen_toks": 32,
1143
- "mean_original_fewshots_size": 3.0,
1144
- "mean_effective_fewshot_size": 2.9993002099370187
1145
  },
1146
- "faquad_nli": {
1147
- "sample_size": 650,
1148
- "truncated": 0,
1149
- "non_truncated": 650,
1150
- "padded": 0,
1151
- "non_padded": 650,
1152
- "fewshots_truncated": 0,
1153
- "mean_seq_length": 1594.9876923076922,
1154
- "min_seq_length": 1539,
1155
- "max_seq_length": 1715,
1156
- "max_ctx_length": 2528,
1157
- "max_gen_toks": 32,
1158
- "mean_original_fewshots_size": 15.0,
1159
- "mean_effective_fewshot_size": 15.0
 
 
 
 
 
 
 
 
1160
  },
1161
- "hatebr_offensive": {
1162
- "sample_size": 1400,
1163
- "truncated": 0,
1164
- "non_truncated": 1400,
1165
- "padded": 0,
1166
- "non_padded": 1400,
1167
- "fewshots_truncated": 0,
1168
- "mean_seq_length": 1305.3878571428572,
1169
- "min_seq_length": 1282,
1170
- "max_seq_length": 1556,
1171
- "max_ctx_length": 2528,
1172
- "max_gen_toks": 32,
1173
- "mean_original_fewshots_size": 25.0,
1174
- "mean_effective_fewshot_size": 25.0
1175
- },
1176
- "oab_exams": {
1177
- "sample_size": 2195,
1178
- "truncated": 0,
1179
- "non_truncated": 2195,
1180
- "padded": 0,
1181
- "non_padded": 2195,
1182
- "fewshots_truncated": 0,
1183
- "mean_seq_length": 1365.764464692483,
1184
- "min_seq_length": 1099,
1185
- "max_seq_length": 1868,
1186
- "max_ctx_length": 2528,
1187
- "max_gen_toks": 32,
1188
- "mean_original_fewshots_size": 3.0,
1189
- "mean_effective_fewshot_size": 3.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190
  },
1191
- "portuguese_hate_speech": {
1192
- "sample_size": 851,
1193
- "truncated": 0,
1194
- "non_truncated": 851,
1195
- "padded": 0,
1196
- "non_padded": 851,
1197
- "fewshots_truncated": 0,
1198
- "mean_seq_length": 1806.3360752056403,
1199
- "min_seq_length": 1771,
1200
- "max_seq_length": 1845,
1201
- "max_ctx_length": 2528,
1202
- "max_gen_toks": 32,
1203
- "mean_original_fewshots_size": 25.0,
1204
- "mean_effective_fewshot_size": 25.0
 
 
 
 
 
 
1205
  },
1206
- "tweetsentbr": {
1207
- "sample_size": 2010,
1208
- "truncated": 0,
1209
- "non_truncated": 2010,
1210
- "padded": 0,
1211
- "non_padded": 2010,
1212
- "fewshots_truncated": 0,
1213
- "mean_seq_length": 1552.2492537313433,
1214
- "min_seq_length": 1531,
1215
- "max_seq_length": 1647,
1216
- "max_ctx_length": 2528,
1217
- "max_gen_toks": 32,
1218
- "mean_original_fewshots_size": 25.0,
1219
- "mean_effective_fewshot_size": 25.0
1220
- }
1221
- },
1222
- "config": {
1223
- "model": "huggingface",
1224
- "model_args": "pretrained=Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
- "batch_size": "auto",
1226
- "batch_sizes": [],
1227
- "device": null,
1228
- "use_cache": null,
1229
- "limit": [
1230
- null,
1231
- null,
1232
- null,
1233
- null,
1234
- null,
1235
- null,
1236
- null,
1237
- null,
1238
- null
1239
- ],
1240
- "bootstrap_iters": 0,
1241
- "gen_kwargs": null
1242
- },
1243
- "git_hash": "5a13f3e"
1244
  }
 
1
  {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.9207426934587088,
5
+ "acc,all": 0.920751633986928,
6
+ "alias": "assin2_rte"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  },
8
+ "assin2_sts": {
9
+ "pearson,all": 0.7806469957644414,
10
+ "mse,all": 0.43004901960784325,
11
+ "alias": "assin2_sts"
12
+ },
13
+ "bluex": {
14
+ "acc,all": 0.5271210013908206,
15
+ "acc,exam_id__USP_2019": 0.45,
16
+ "acc,exam_id__USP_2018": 0.3888888888888889,
17
+ "acc,exam_id__USP_2020": 0.5714285714285714,
18
+ "acc,exam_id__USP_2022": 0.4897959183673469,
19
+ "acc,exam_id__USP_2023": 0.5909090909090909,
20
+ "acc,exam_id__UNICAMP_2019": 0.54,
21
+ "acc,exam_id__UNICAMP_2020": 0.5454545454545454,
22
+ "acc,exam_id__UNICAMP_2023": 0.5581395348837209,
23
+ "acc,exam_id__UNICAMP_2021_1": 0.5,
24
+ "acc,exam_id__UNICAMP_2022": 0.6153846153846154,
25
+ "acc,exam_id__UNICAMP_2024": 0.5777777777777777,
26
+ "acc,exam_id__UNICAMP_2018": 0.4444444444444444,
27
+ "acc,exam_id__USP_2024": 0.7560975609756098,
28
+ "acc,exam_id__USP_2021": 0.4807692307692308,
29
+ "acc,exam_id__UNICAMP_2021_2": 0.47058823529411764,
30
+ "alias": "bluex"
31
+ },
32
+ "enem_challenge": {
33
+ "alias": "enem",
34
+ "acc,all": 0.6431070678796361,
35
+ "acc,exam_id__2016": 0.5702479338842975,
36
+ "acc,exam_id__2013": 0.6666666666666666,
37
+ "acc,exam_id__2012": 0.6379310344827587,
38
+ "acc,exam_id__2023": 0.6814814814814815,
39
+ "acc,exam_id__2016_2": 0.6504065040650406,
40
+ "acc,exam_id__2010": 0.7094017094017094,
41
+ "acc,exam_id__2009": 0.6173913043478261,
42
+ "acc,exam_id__2011": 0.7008547008547008,
43
+ "acc,exam_id__2015": 0.5966386554621849,
44
+ "acc,exam_id__2014": 0.5871559633027523,
45
+ "acc,exam_id__2017": 0.6810344827586207,
46
+ "acc,exam_id__2022": 0.6165413533834586
47
+ },
48
+ "faquad_nli": {
49
+ "f1_macro,all": 0.7641446815289443,
50
+ "acc,all": 0.8107692307692308,
51
+ "alias": "faquad_nli"
52
+ },
53
+ "hatebr_offensive": {
54
+ "alias": "hatebr_offensive_binary",
55
+ "f1_macro,all": 0.8412698412698412,
56
+ "acc,all": 0.8428571428571429
57
+ },
58
+ "oab_exams": {
59
+ "acc,all": 0.41594533029612757,
60
+ "acc,exam_id__2017-24": 0.3375,
61
+ "acc,exam_id__2011-05": 0.45,
62
+ "acc,exam_id__2015-16": 0.35,
63
+ "acc,exam_id__2016-20a": 0.3625,
64
+ "acc,exam_id__2011-03": 0.32323232323232326,
65
+ "acc,exam_id__2012-09": 0.38961038961038963,
66
+ "acc,exam_id__2012-06": 0.5,
67
+ "acc,exam_id__2012-06a": 0.375,
68
+ "acc,exam_id__2013-11": 0.45,
69
+ "acc,exam_id__2013-10": 0.4,
70
+ "acc,exam_id__2018-25": 0.4875,
71
+ "acc,exam_id__2014-13": 0.3125,
72
+ "acc,exam_id__2014-14": 0.5125,
73
+ "acc,exam_id__2012-07": 0.3625,
74
+ "acc,exam_id__2017-23": 0.4625,
75
+ "acc,exam_id__2016-20": 0.3625,
76
+ "acc,exam_id__2016-21": 0.3625,
77
+ "acc,exam_id__2011-04": 0.4125,
78
+ "acc,exam_id__2015-18": 0.4375,
79
+ "acc,exam_id__2016-19": 0.48717948717948717,
80
+ "acc,exam_id__2014-15": 0.46153846153846156,
81
+ "acc,exam_id__2017-22": 0.55,
82
+ "acc,exam_id__2012-08": 0.425,
83
+ "acc,exam_id__2015-17": 0.5,
84
+ "acc,exam_id__2010-01": 0.35294117647058826,
85
+ "acc,exam_id__2013-12": 0.4,
86
+ "acc,exam_id__2010-02": 0.43,
87
+ "alias": "oab_exams"
88
+ },
89
+ "portuguese_hate_speech": {
90
+ "alias": "portuguese_hate_speech_binary",
91
+ "f1_macro,all": 0.6731605919879914,
92
+ "acc,all": 0.6921269095182139
93
+ },
94
+ "tweetsentbr": {
95
+ "f1_macro,all": 0.6629793868028777,
96
+ "acc,all": 0.7059701492537314,
97
+ "alias": "tweetsentbr"
98
  }
99
+ },
100
+ "configs": {
101
+ "assin2_rte": {
102
+ "task": "assin2_rte",
103
+ "group": [
104
+ "pt_benchmark",
105
+ "assin2"
106
+ ],
107
+ "dataset_path": "assin2",
108
+ "test_split": "test",
109
+ "fewshot_split": "train",
110
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
111
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
112
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
113
+ "target_delimiter": " ",
114
+ "fewshot_delimiter": "\n\n",
115
+ "fewshot_config": {
116
+ "sampler": "id_sampler",
117
+ "sampler_config": {
118
+ "id_list": [
119
+ 1,
120
+ 3251,
121
+ 2,
122
+ 3252,
123
+ 3,
124
+ 4,
125
+ 5,
126
+ 6,
127
+ 3253,
128
+ 7,
129
+ 3254,
130
+ 3255,
131
+ 3256,
132
+ 8,
133
+ 9,
134
+ 10,
135
+ 3257,
136
+ 11,
137
+ 3258,
138
+ 12,
139
+ 13,
140
+ 14,
141
+ 15,
142
+ 3259,
143
+ 3260,
144
+ 3261,
145
+ 3262,
146
+ 3263,
147
+ 16,
148
+ 17,
149
+ 3264,
150
+ 18,
151
+ 3265,
152
+ 3266,
153
+ 3267,
154
+ 19,
155
+ 20,
156
+ 3268,
157
+ 3269,
158
+ 21,
159
+ 3270,
160
+ 3271,
161
+ 22,
162
+ 3272,
163
+ 3273,
164
+ 23,
165
+ 3274,
166
+ 24,
167
+ 25,
168
+ 3275
169
+ ],
170
+ "id_column": "sentence_pair_id"
171
+ }
172
+ },
173
+ "num_fewshot": 15,
174
+ "metric_list": [
175
+ {
176
+ "metric": "f1_macro",
177
+ "aggregation": "f1_macro",
178
+ "higher_is_better": true
179
+ },
180
+ {
181
+ "metric": "acc",
182
+ "aggregation": "acc",
183
+ "higher_is_better": true
184
+ }
185
+ ],
186
+ "output_type": "generate_until",
187
+ "generation_kwargs": {
188
+ "max_gen_toks": 32,
189
+ "do_sample": false,
190
+ "temperature": 0.0,
191
+ "top_k": null,
192
+ "top_p": null,
193
+ "until": [
194
+ "\n\n"
195
+ ]
196
  },
197
+ "repeats": 1,
198
+ "filter_list": [
199
+ {
200
+ "name": "all",
201
+ "filter": [
202
+ {
203
+ "function": "find_similar_label",
204
+ "labels": [
205
+ "Sim",
206
+ "Não"
207
+ ]
208
+ },
209
+ {
210
+ "function": "take_first"
211
+ }
212
+ ]
213
+ }
214
+ ],
215
+ "should_decontaminate": false,
216
+ "metadata": {
217
+ "version": 1.1
218
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  },
220
+ "assin2_sts": {
221
+ "task": "assin2_sts",
222
+ "group": [
223
+ "pt_benchmark",
224
+ "assin2"
225
+ ],
226
+ "dataset_path": "assin2",
227
+ "test_split": "test",
228
+ "fewshot_split": "train",
229
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
230
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x7fe8c9bd68e0>",
231
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
232
+ "target_delimiter": " ",
233
+ "fewshot_delimiter": "\n\n",
234
+ "fewshot_config": {
235
+ "sampler": "id_sampler",
236
+ "sampler_config": {
237
+ "id_list": [
238
+ 1,
239
+ 3251,
240
+ 2,
241
+ 3252,
242
+ 3,
243
+ 4,
244
+ 5,
245
+ 6,
246
+ 3253,
247
+ 7,
248
+ 3254,
249
+ 3255,
250
+ 3256,
251
+ 8,
252
+ 9,
253
+ 10,
254
+ 3257,
255
+ 11,
256
+ 3258,
257
+ 12,
258
+ 13,
259
+ 14,
260
+ 15,
261
+ 3259,
262
+ 3260,
263
+ 3261,
264
+ 3262,
265
+ 3263,
266
+ 16,
267
+ 17,
268
+ 3264,
269
+ 18,
270
+ 3265,
271
+ 3266,
272
+ 3267,
273
+ 19,
274
+ 20,
275
+ 3268,
276
+ 3269,
277
+ 21,
278
+ 3270,
279
+ 3271,
280
+ 22,
281
+ 3272,
282
+ 3273,
283
+ 23,
284
+ 3274,
285
+ 24,
286
+ 25,
287
+ 3275
288
+ ],
289
+ "id_column": "sentence_pair_id"
290
+ }
291
  },
292
+ "num_fewshot": 15,
293
+ "metric_list": [
294
+ {
295
+ "metric": "pearson",
296
+ "aggregation": "pearsonr",
297
+ "higher_is_better": true
298
+ },
299
+ {
300
+ "metric": "mse",
301
+ "aggregation": "mean_squared_error",
302
+ "higher_is_better": false
303
+ }
304
+ ],
305
+ "output_type": "generate_until",
306
+ "generation_kwargs": {
307
+ "max_gen_toks": 32,
308
+ "do_sample": false,
309
+ "temperature": 0.0,
310
+ "top_k": null,
311
+ "top_p": null,
312
+ "until": [
313
+ "\n\n"
314
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  },
316
+ "repeats": 1,
317
+ "filter_list": [
318
+ {
319
+ "name": "all",
320
+ "filter": [
321
+ {
322
+ "function": "number_filter",
323
+ "type": "float",
324
+ "range_min": 1.0,
325
+ "range_max": 5.0,
326
+ "on_outside_range": "clip",
327
+ "fallback": 5.0
328
+ },
329
+ {
330
+ "function": "take_first"
331
+ }
332
+ ]
333
+ }
334
+ ],
335
+ "should_decontaminate": false,
336
+ "metadata": {
337
+ "version": 1.1
338
+ }
339
+ },
340
+ "bluex": {
341
+ "task": "bluex",
342
+ "group": [
343
+ "pt_benchmark",
344
+ "vestibular"
345
+ ],
346
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
347
+ "test_split": "train",
348
+ "fewshot_split": "train",
349
+ "doc_to_text": "<function enem_doc_to_text at 0x7fe8c9bd62a0>",
350
+ "doc_to_target": "{{answerKey}}",
351
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
352
+ "target_delimiter": " ",
353
+ "fewshot_delimiter": "\n\n",
354
+ "fewshot_config": {
355
+ "sampler": "id_sampler",
356
+ "sampler_config": {
357
+ "id_list": [
358
+ "USP_2018_3",
359
+ "UNICAMP_2018_2",
360
+ "USP_2018_35",
361
+ "UNICAMP_2018_16",
362
+ "USP_2018_89"
363
+ ],
364
+ "id_column": "id",
365
+ "exclude_from_task": true
366
+ }
367
  },
368
+ "num_fewshot": 3,
369
+ "metric_list": [
370
+ {
371
+ "metric": "acc",
372
+ "aggregation": "acc",
373
+ "higher_is_better": true
374
+ }
375
+ ],
376
+ "output_type": "generate_until",
377
+ "generation_kwargs": {
378
+ "max_gen_toks": 32,
379
+ "do_sample": false,
380
+ "temperature": 0.0,
381
+ "top_k": null,
382
+ "top_p": null,
383
+ "until": [
384
+ "\n\n"
385
+ ]
386
  },
387
+ "repeats": 1,
388
+ "filter_list": [
389
+ {
390
+ "name": "all",
391
+ "filter": [
392
+ {
393
+ "function": "normalize_spaces"
394
+ },
395
+ {
396
+ "function": "remove_accents"
397
+ },
398
+ {
399
+ "function": "find_choices",
400
+ "choices": [
401
+ "A",
402
+ "B",
403
+ "C",
404
+ "D",
405
+ "E"
406
+ ],
407
+ "regex_patterns": [
408
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
409
+ "\\b([ABCDE])\\.",
410
+ "\\b([ABCDE]) ?[.):-]",
411
+ "\\b([ABCDE])$",
412
+ "\\b([ABCDE])\\b"
413
+ ]
414
+ },
415
+ {
416
+ "function": "take_first"
417
+ }
418
+ ],
419
+ "group_by": {
420
+ "column": "exam_id"
421
+ }
422
+ }
423
+ ],
424
+ "should_decontaminate": true,
425
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fe8c9bd6520>",
426
+ "metadata": {
427
+ "version": 1.1
428
  }
429
+ },
430
+ "enem_challenge": {
431
+ "task": "enem_challenge",
432
+ "task_alias": "enem",
433
+ "group": [
434
+ "pt_benchmark",
435
+ "vestibular"
436
+ ],
437
+ "dataset_path": "eduagarcia/enem_challenge",
438
+ "test_split": "train",
439
+ "fewshot_split": "train",
440
+ "doc_to_text": "<function enem_doc_to_text at 0x7fe8c9bd6ac0>",
441
+ "doc_to_target": "{{answerKey}}",
442
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
443
+ "target_delimiter": " ",
444
+ "fewshot_delimiter": "\n\n",
445
+ "fewshot_config": {
446
+ "sampler": "id_sampler",
447
+ "sampler_config": {
448
+ "id_list": [
449
+ "2022_21",
450
+ "2022_88",
451
+ "2022_143"
452
+ ],
453
+ "id_column": "id",
454
+ "exclude_from_task": true
455
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
  },
457
+ "num_fewshot": 3,
458
+ "metric_list": [
459
+ {
460
+ "metric": "acc",
461
+ "aggregation": "acc",
462
+ "higher_is_better": true
463
+ }
464
+ ],
465
+ "output_type": "generate_until",
466
+ "generation_kwargs": {
467
+ "max_gen_toks": 32,
468
+ "do_sample": false,
469
+ "temperature": 0.0,
470
+ "top_k": null,
471
+ "top_p": null,
472
+ "until": [
473
+ "\n\n"
474
+ ]
475
  },
476
+ "repeats": 1,
477
+ "filter_list": [
478
+ {
479
+ "name": "all",
480
+ "filter": [
481
+ {
482
+ "function": "normalize_spaces"
483
+ },
484
+ {
485
+ "function": "remove_accents"
486
+ },
487
+ {
488
+ "function": "find_choices",
489
+ "choices": [
490
+ "A",
491
+ "B",
492
+ "C",
493
+ "D",
494
+ "E"
495
+ ],
496
+ "regex_patterns": [
497
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
498
+ "\\b([ABCDE])\\.",
499
+ "\\b([ABCDE]) ?[.):-]",
500
+ "\\b([ABCDE])$",
501
+ "\\b([ABCDE])\\b"
502
+ ]
503
+ },
504
+ {
505
+ "function": "take_first"
506
+ }
507
+ ],
508
+ "group_by": {
509
+ "column": "exam_id"
510
+ }
511
+ }
512
+ ],
513
+ "should_decontaminate": true,
514
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fe8c9bd6d40>",
515
+ "metadata": {
516
+ "version": 1.1
517
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518
  },
519
+ "faquad_nli": {
520
+ "task": "faquad_nli",
521
+ "group": [
522
+ "pt_benchmark"
523
+ ],
524
+ "dataset_path": "ruanchaves/faquad-nli",
525
+ "test_split": "test",
526
+ "fewshot_split": "train",
527
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
528
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
529
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
530
+ "target_delimiter": " ",
531
+ "fewshot_delimiter": "\n\n",
532
+ "fewshot_config": {
533
+ "sampler": "first_n",
534
+ "sampler_config": {
535
+ "fewshot_indices": [
536
+ 1893,
537
+ 949,
538
+ 663,
539
+ 105,
540
+ 1169,
541
+ 2910,
542
+ 2227,
543
+ 2813,
544
+ 974,
545
+ 558,
546
+ 1503,
547
+ 1958,
548
+ 2918,
549
+ 601,
550
+ 1560,
551
+ 984,
552
+ 2388,
553
+ 995,
554
+ 2233,
555
+ 1982,
556
+ 165,
557
+ 2788,
558
+ 1312,
559
+ 2285,
560
+ 522,
561
+ 1113,
562
+ 1670,
563
+ 323,
564
+ 236,
565
+ 1263,
566
+ 1562,
567
+ 2519,
568
+ 1049,
569
+ 432,
570
+ 1167,
571
+ 1394,
572
+ 2022,
573
+ 2551,
574
+ 2194,
575
+ 2187,
576
+ 2282,
577
+ 2816,
578
+ 108,
579
+ 301,
580
+ 1185,
581
+ 1315,
582
+ 1420,
583
+ 2436,
584
+ 2322,
585
+ 766
586
+ ]
587
+ }
588
  },
589
+ "num_fewshot": 15,
590
+ "metric_list": [
591
+ {
592
+ "metric": "f1_macro",
593
+ "aggregation": "f1_macro",
594
+ "higher_is_better": true
595
+ },
596
+ {
597
+ "metric": "acc",
598
+ "aggregation": "acc",
599
+ "higher_is_better": true
600
+ }
601
+ ],
602
+ "output_type": "generate_until",
603
+ "generation_kwargs": {
604
+ "max_gen_toks": 32,
605
+ "do_sample": false,
606
+ "temperature": 0.0,
607
+ "top_k": null,
608
+ "top_p": null,
609
+ "until": [
610
+ "\n\n"
611
+ ]
612
+ },
613
+ "repeats": 1,
614
+ "filter_list": [
615
+ {
616
+ "name": "all",
617
+ "filter": [
618
+ {
619
+ "function": "find_similar_label",
620
+ "labels": [
621
+ "Sim",
622
+ "Não"
623
+ ]
624
+ },
625
+ {
626
+ "function": "take_first"
627
+ }
628
+ ]
629
+ }
630
+ ],
631
+ "should_decontaminate": false,
632
+ "metadata": {
633
+ "version": 1.1
634
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
  },
636
+ "hatebr_offensive": {
637
+ "task": "hatebr_offensive",
638
+ "task_alias": "hatebr_offensive_binary",
639
+ "group": [
640
+ "pt_benchmark"
641
+ ],
642
+ "dataset_path": "eduagarcia/portuguese_benchmark",
643
+ "dataset_name": "HateBR_offensive_binary",
644
+ "test_split": "test",
645
+ "fewshot_split": "train",
646
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
647
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
648
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
649
+ "target_delimiter": " ",
650
+ "fewshot_delimiter": "\n\n",
651
+ "fewshot_config": {
652
+ "sampler": "id_sampler",
653
+ "sampler_config": {
654
+ "id_list": [
655
+ 48,
656
+ 44,
657
+ 36,
658
+ 20,
659
+ 3511,
660
+ 88,
661
+ 3555,
662
+ 16,
663
+ 56,
664
+ 3535,
665
+ 60,
666
+ 40,
667
+ 3527,
668
+ 4,
669
+ 76,
670
+ 3579,
671
+ 3523,
672
+ 3551,
673
+ 68,
674
+ 3503,
675
+ 84,
676
+ 3539,
677
+ 64,
678
+ 3599,
679
+ 80,
680
+ 3563,
681
+ 3559,
682
+ 3543,
683
+ 3547,
684
+ 3587,
685
+ 3595,
686
+ 3575,
687
+ 3567,
688
+ 3591,
689
+ 24,
690
+ 96,
691
+ 92,
692
+ 3507,
693
+ 52,
694
+ 72,
695
+ 8,
696
+ 3571,
697
+ 3515,
698
+ 3519,
699
+ 3531,
700
+ 28,
701
+ 32,
702
+ 0,
703
+ 12,
704
+ 3583
705
+ ],
706
+ "id_column": "idx"
707
+ }
708
  },
709
+ "num_fewshot": 25,
710
+ "metric_list": [
711
+ {
712
+ "metric": "f1_macro",
713
+ "aggregation": "f1_macro",
714
+ "higher_is_better": true
715
+ },
716
+ {
717
+ "metric": "acc",
718
+ "aggregation": "acc",
719
+ "higher_is_better": true
720
+ }
721
+ ],
722
+ "output_type": "generate_until",
723
+ "generation_kwargs": {
724
+ "max_gen_toks": 32,
725
+ "do_sample": false,
726
+ "temperature": 0.0,
727
+ "top_k": null,
728
+ "top_p": null,
729
+ "until": [
730
+ "\n\n"
731
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732
  },
733
+ "repeats": 1,
734
+ "filter_list": [
735
+ {
736
+ "name": "all",
737
+ "filter": [
738
+ {
739
+ "function": "find_similar_label",
740
+ "labels": [
741
+ "Sim",
742
+ "Não"
743
+ ]
744
+ },
745
+ {
746
+ "function": "take_first"
747
+ }
748
+ ]
749
+ }
750
+ ],
751
+ "should_decontaminate": false,
752
+ "metadata": {
753
+ "version": 1.0
754
+ }
755
+ },
756
+ "oab_exams": {
757
+ "task": "oab_exams",
758
+ "group": [
759
+ "legal_benchmark",
760
+ "pt_benchmark"
761
+ ],
762
+ "dataset_path": "eduagarcia/oab_exams",
763
+ "test_split": "train",
764
+ "fewshot_split": "train",
765
+ "doc_to_text": "<function doc_to_text at 0x7fe8c9bd5c60>",
766
+ "doc_to_target": "{{answerKey}}",
767
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
768
+ "target_delimiter": " ",
769
+ "fewshot_delimiter": "\n\n",
770
+ "fewshot_config": {
771
+ "sampler": "id_sampler",
772
+ "sampler_config": {
773
+ "id_list": [
774
+ "2010-01_1",
775
+ "2010-01_11",
776
+ "2010-01_13",
777
+ "2010-01_23",
778
+ "2010-01_26",
779
+ "2010-01_28",
780
+ "2010-01_38",
781
+ "2010-01_48",
782
+ "2010-01_58",
783
+ "2010-01_68",
784
+ "2010-01_76",
785
+ "2010-01_83",
786
+ "2010-01_85",
787
+ "2010-01_91",
788
+ "2010-01_99"
789
+ ],
790
+ "id_column": "id",
791
+ "exclude_from_task": true
792
+ }
793
  },
794
+ "num_fewshot": 3,
795
+ "metric_list": [
796
+ {
797
+ "metric": "acc",
798
+ "aggregation": "acc",
799
+ "higher_is_better": true
800
+ }
801
+ ],
802
+ "output_type": "generate_until",
803
+ "generation_kwargs": {
804
+ "max_gen_toks": 32,
805
+ "do_sample": false,
806
+ "temperature": 0.0,
807
+ "top_k": null,
808
+ "top_p": null,
809
+ "until": [
810
+ "\n\n"
811
+ ]
812
  },
813
+ "repeats": 1,
814
+ "filter_list": [
815
+ {
816
+ "name": "all",
817
+ "filter": [
818
+ {
819
+ "function": "normalize_spaces"
820
+ },
821
+ {
822
+ "function": "remove_accents"
823
+ },
824
+ {
825
+ "function": "find_choices",
826
+ "choices": [
827
+ "A",
828
+ "B",
829
+ "C",
830
+ "D"
831
+ ],
832
+ "regex_patterns": [
833
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
834
+ "\\b([ABCD])\\.",
835
+ "\\b([ABCD]) ?[.):-]",
836
+ "\\b([ABCD])$",
837
+ "\\b([ABCD])\\b"
838
+ ]
839
+ },
840
+ {
841
+ "function": "take_first"
842
+ }
843
+ ],
844
+ "group_by": {
845
+ "column": "exam_id"
846
+ }
847
+ }
848
+ ],
849
+ "should_decontaminate": true,
850
+ "doc_to_decontamination_query": "<function doc_to_text at 0x7fe8c9bd5ee0>",
851
+ "metadata": {
852
+ "version": 1.5
853
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
854
  },
855
+ "portuguese_hate_speech": {
856
+ "task": "portuguese_hate_speech",
857
+ "task_alias": "portuguese_hate_speech_binary",
858
+ "group": [
859
+ "pt_benchmark"
860
+ ],
861
+ "dataset_path": "eduagarcia/portuguese_benchmark",
862
+ "dataset_name": "Portuguese_Hate_Speech_binary",
863
+ "test_split": "test",
864
+ "fewshot_split": "train",
865
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
866
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
867
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
868
+ "target_delimiter": " ",
869
+ "fewshot_delimiter": "\n\n",
870
+ "fewshot_config": {
871
+ "sampler": "id_sampler",
872
+ "sampler_config": {
873
+ "id_list": [
874
+ 52,
875
+ 50,
876
+ 39,
877
+ 28,
878
+ 3,
879
+ 105,
880
+ 22,
881
+ 25,
882
+ 60,
883
+ 11,
884
+ 66,
885
+ 41,
886
+ 9,
887
+ 4,
888
+ 91,
889
+ 42,
890
+ 7,
891
+ 20,
892
+ 76,
893
+ 1,
894
+ 104,
895
+ 13,
896
+ 67,
897
+ 54,
898
+ 97,
899
+ 27,
900
+ 24,
901
+ 14,
902
+ 16,
903
+ 48,
904
+ 53,
905
+ 40,
906
+ 34,
907
+ 49,
908
+ 32,
909
+ 119,
910
+ 114,
911
+ 2,
912
+ 58,
913
+ 83,
914
+ 18,
915
+ 36,
916
+ 5,
917
+ 6,
918
+ 10,
919
+ 35,
920
+ 38,
921
+ 0,
922
+ 21,
923
+ 46
924
+ ],
925
+ "id_column": "idx"
926
+ }
927
+ },
928
+ "num_fewshot": 25,
929
+ "metric_list": [
930
+ {
931
+ "metric": "f1_macro",
932
+ "aggregation": "f1_macro",
933
+ "higher_is_better": true
934
+ },
935
+ {
936
+ "metric": "acc",
937
+ "aggregation": "acc",
938
+ "higher_is_better": true
939
+ }
940
+ ],
941
+ "output_type": "generate_until",
942
+ "generation_kwargs": {
943
+ "max_gen_toks": 32,
944
+ "do_sample": false,
945
+ "temperature": 0.0,
946
+ "top_k": null,
947
+ "top_p": null,
948
+ "until": [
949
+ "\n\n"
950
+ ]
951
  },
952
+ "repeats": 1,
953
+ "filter_list": [
954
+ {
955
+ "name": "all",
956
+ "filter": [
957
+ {
958
+ "function": "find_similar_label",
959
+ "labels": [
960
+ "Sim",
961
+ "Não"
962
+ ]
963
+ },
964
+ {
965
+ "function": "take_first"
966
+ }
967
+ ]
968
+ }
969
+ ],
970
+ "should_decontaminate": false,
971
+ "metadata": {
972
+ "version": 1.0
973
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
974
  },
975
+ "tweetsentbr": {
976
+ "task": "tweetsentbr",
977
+ "group": [
978
+ "pt_benchmark"
979
+ ],
980
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
981
+ "test_split": "test",
982
+ "fewshot_split": "train",
983
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
984
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
985
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
986
+ "target_delimiter": " ",
987
+ "fewshot_delimiter": "\n\n",
988
+ "fewshot_config": {
989
+ "sampler": "first_n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990
  },
991
+ "num_fewshot": 25,
992
+ "metric_list": [
993
+ {
994
+ "metric": "f1_macro",
995
+ "aggregation": "f1_macro",
996
+ "higher_is_better": true
997
+ },
998
+ {
999
+ "metric": "acc",
1000
+ "aggregation": "acc",
1001
+ "higher_is_better": true
1002
+ }
1003
+ ],
1004
+ "output_type": "generate_until",
1005
+ "generation_kwargs": {
1006
+ "max_gen_toks": 32,
1007
+ "do_sample": false,
1008
+ "temperature": 0.0,
1009
+ "top_k": null,
1010
+ "top_p": null,
1011
+ "until": [
1012
+ "\n\n"
1013
+ ]
1014
+ },
1015
+ "repeats": 1,
1016
+ "filter_list": [
1017
+ {
1018
+ "name": "all",
1019
+ "filter": [
1020
+ {
1021
+ "function": "find_similar_label",
1022
+ "labels": [
1023
+ "Positivo",
1024
+ "Neutro",
1025
+ "Negativo"
1026
+ ]
1027
+ },
1028
+ {
1029
+ "function": "take_first"
1030
+ }
1031
+ ]
1032
+ }
1033
+ ],
1034
+ "should_decontaminate": false,
1035
+ "metadata": {
1036
+ "version": 1.0
1037
  }
 
1038
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
  },
1040
+ "versions": {
1041
+ "assin2_rte": 1.1,
1042
+ "assin2_sts": 1.1,
1043
+ "bluex": 1.1,
1044
+ "enem_challenge": 1.1,
1045
+ "faquad_nli": 1.1,
1046
+ "hatebr_offensive": 1.0,
1047
+ "oab_exams": 1.5,
1048
+ "portuguese_hate_speech": 1.0,
1049
+ "tweetsentbr": 1.0
 
 
 
 
1050
  },
1051
+ "n-shot": {
1052
+ "assin2_rte": 15,
1053
+ "assin2_sts": 15,
1054
+ "bluex": 3,
1055
+ "enem_challenge": 3,
1056
+ "faquad_nli": 15,
1057
+ "hatebr_offensive": 25,
1058
+ "oab_exams": 3,
1059
+ "portuguese_hate_speech": 25,
1060
+ "tweetsentbr": 25
 
 
 
 
1061
  },
1062
+ "model_meta": {
1063
+ "truncated": 1,
1064
+ "non_truncated": 14149,
1065
+ "padded": 0,
1066
+ "non_padded": 14150,
1067
+ "fewshots_truncated": 1,
1068
+ "has_chat_template": false,
1069
+ "chat_type": null,
1070
+ "n_gpus": 1,
1071
+ "accelerate_num_process": null,
1072
+ "model_sha": "43ea8d27d652dc15e4d27f665c5d636a5937780b",
1073
+ "model_dtype": "torch.float16",
1074
+ "model_memory_footprint": 14483472384,
1075
+ "model_num_parameters": 7241732096,
1076
+ "model_is_loaded_in_4bit": null,
1077
+ "model_is_loaded_in_8bit": null,
1078
+ "model_is_quantized": null,
1079
+ "model_device": "cuda:0",
1080
+ "batch_size": 32,
1081
+ "max_length": 2560,
1082
+ "max_ctx_length": 2528,
1083
+ "max_gen_toks": 32
1084
  },
1085
+ "task_model_meta": {
1086
+ "assin2_rte": {
1087
+ "sample_size": 2448,
1088
+ "truncated": 0,
1089
+ "non_truncated": 2448,
1090
+ "padded": 0,
1091
+ "non_padded": 2448,
1092
+ "fewshots_truncated": 0,
1093
+ "mean_seq_length": 1369.7455065359477,
1094
+ "min_seq_length": 1346,
1095
+ "max_seq_length": 1436,
1096
+ "max_ctx_length": 2528,
1097
+ "max_gen_toks": 32,
1098
+ "mean_original_fewshots_size": 15.0,
1099
+ "mean_effective_fewshot_size": 15.0
1100
+ },
1101
+ "assin2_sts": {
1102
+ "sample_size": 2448,
1103
+ "truncated": 0,
1104
+ "non_truncated": 2448,
1105
+ "padded": 0,
1106
+ "non_padded": 2448,
1107
+ "fewshots_truncated": 0,
1108
+ "mean_seq_length": 1593.7455065359477,
1109
+ "min_seq_length": 1570,
1110
+ "max_seq_length": 1660,
1111
+ "max_ctx_length": 2528,
1112
+ "max_gen_toks": 32,
1113
+ "mean_original_fewshots_size": 15.0,
1114
+ "mean_effective_fewshot_size": 15.0
1115
+ },
1116
+ "bluex": {
1117
+ "sample_size": 719,
1118
+ "truncated": 0,
1119
+ "non_truncated": 719,
1120
+ "padded": 0,
1121
+ "non_padded": 719,
1122
+ "fewshots_truncated": 0,
1123
+ "mean_seq_length": 1719.9262865090404,
1124
+ "min_seq_length": 1343,
1125
+ "max_seq_length": 2520,
1126
+ "max_ctx_length": 2528,
1127
+ "max_gen_toks": 32,
1128
+ "mean_original_fewshots_size": 3.0,
1129
+ "mean_effective_fewshot_size": 3.0
1130
+ },
1131
+ "enem_challenge": {
1132
+ "sample_size": 1429,
1133
+ "truncated": 1,
1134
+ "non_truncated": 1428,
1135
+ "padded": 0,
1136
+ "non_padded": 1429,
1137
+ "fewshots_truncated": 1,
1138
+ "mean_seq_length": 1620.039188243527,
1139
+ "min_seq_length": 1354,
1140
+ "max_seq_length": 2618,
1141
+ "max_ctx_length": 2528,
1142
+ "max_gen_toks": 32,
1143
+ "mean_original_fewshots_size": 3.0,
1144
+ "mean_effective_fewshot_size": 2.9993002099370187
1145
+ },
1146
+ "faquad_nli": {
1147
+ "sample_size": 650,
1148
+ "truncated": 0,
1149
+ "non_truncated": 650,
1150
+ "padded": 0,
1151
+ "non_padded": 650,
1152
+ "fewshots_truncated": 0,
1153
+ "mean_seq_length": 1594.9876923076922,
1154
+ "min_seq_length": 1539,
1155
+ "max_seq_length": 1715,
1156
+ "max_ctx_length": 2528,
1157
+ "max_gen_toks": 32,
1158
+ "mean_original_fewshots_size": 15.0,
1159
+ "mean_effective_fewshot_size": 15.0
1160
+ },
1161
+ "hatebr_offensive": {
1162
+ "sample_size": 1400,
1163
+ "truncated": 0,
1164
+ "non_truncated": 1400,
1165
+ "padded": 0,
1166
+ "non_padded": 1400,
1167
+ "fewshots_truncated": 0,
1168
+ "mean_seq_length": 1305.3878571428572,
1169
+ "min_seq_length": 1282,
1170
+ "max_seq_length": 1556,
1171
+ "max_ctx_length": 2528,
1172
+ "max_gen_toks": 32,
1173
+ "mean_original_fewshots_size": 25.0,
1174
+ "mean_effective_fewshot_size": 25.0
1175
+ },
1176
+ "oab_exams": {
1177
+ "sample_size": 2195,
1178
+ "truncated": 0,
1179
+ "non_truncated": 2195,
1180
+ "padded": 0,
1181
+ "non_padded": 2195,
1182
+ "fewshots_truncated": 0,
1183
+ "mean_seq_length": 1365.764464692483,
1184
+ "min_seq_length": 1099,
1185
+ "max_seq_length": 1868,
1186
+ "max_ctx_length": 2528,
1187
+ "max_gen_toks": 32,
1188
+ "mean_original_fewshots_size": 3.0,
1189
+ "mean_effective_fewshot_size": 3.0
1190
+ },
1191
+ "portuguese_hate_speech": {
1192
+ "sample_size": 851,
1193
+ "truncated": 0,
1194
+ "non_truncated": 851,
1195
+ "padded": 0,
1196
+ "non_padded": 851,
1197
+ "fewshots_truncated": 0,
1198
+ "mean_seq_length": 1806.3360752056403,
1199
+ "min_seq_length": 1771,
1200
+ "max_seq_length": 1845,
1201
+ "max_ctx_length": 2528,
1202
+ "max_gen_toks": 32,
1203
+ "mean_original_fewshots_size": 25.0,
1204
+ "mean_effective_fewshot_size": 25.0
1205
+ },
1206
+ "tweetsentbr": {
1207
+ "sample_size": 2010,
1208
+ "truncated": 0,
1209
+ "non_truncated": 2010,
1210
+ "padded": 0,
1211
+ "non_padded": 2010,
1212
+ "fewshots_truncated": 0,
1213
+ "mean_seq_length": 1552.2492537313433,
1214
+ "min_seq_length": 1531,
1215
+ "max_seq_length": 1647,
1216
+ "max_ctx_length": 2528,
1217
+ "max_gen_toks": 32,
1218
+ "mean_original_fewshots_size": 25.0,
1219
+ "mean_effective_fewshot_size": 25.0
1220
+ }
1221
  },
1222
+ "config": {
1223
+ "model": "huggingface",
1224
+ "model_args": "pretrained=Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5,dtype=float16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560",
1225
+ "batch_size": "auto",
1226
+ "batch_sizes": [],
1227
+ "device": null,
1228
+ "use_cache": null,
1229
+ "limit": [
1230
+ null,
1231
+ null,
1232
+ null,
1233
+ null,
1234
+ null,
1235
+ null,
1236
+ null,
1237
+ null,
1238
+ null
1239
+ ],
1240
+ "bootstrap_iters": 0,
1241
+ "gen_kwargs": null
1242
  },
1243
+ "git_hash": "5a13f3e"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244
  }
Kukedlc/NeuralExperiment-7b-MagicCoder-v7.5/results_2024-07-28T01-32-22.165106.json CHANGED
@@ -34,8 +34,8 @@
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
- "all_grouped_average": 0.6737080826309633,
38
- "all_grouped_npm": 0.5178442795207305,
39
  "all_grouped": {
40
  "enem_challenge": 0.6431070678796361,
41
  "bluex": 0.5271210013908206,
@@ -45,7 +45,7 @@
45
  "faquad_nli": 0.7641446815289443,
46
  "hatebr_offensive": 0.8412698412698412,
47
  "portuguese_hate_speech": 0.6731605919879914,
48
- "tweetsentbr": 0.49723454010215834
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6431070678796361,
@@ -56,7 +56,7 @@
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7641446815289443,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8412698412698412,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6731605919879914,
59
- "harness|tweetsentbr|tweetsentbr|None|25": 0.49723454010215834
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6431070678796361,
@@ -150,9 +150,9 @@
150
  "main_score": 0.6731605919879914
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
- "f1_macro,all": 0.49723454010215834,
154
  "acc,all": 0.7059701492537314,
155
- "main_score": 0.49723454010215834
156
  }
157
  },
158
  "config_tasks": {
 
34
  "eval_version": "1.1.0"
35
  },
36
  "results": {
37
+ "all_grouped_average": 0.692124176708821,
38
+ "all_grouped_npm": 0.5452491814223044,
39
  "all_grouped": {
40
  "enem_challenge": 0.6431070678796361,
41
  "bluex": 0.5271210013908206,
 
45
  "faquad_nli": 0.7641446815289443,
46
  "hatebr_offensive": 0.8412698412698412,
47
  "portuguese_hate_speech": 0.6731605919879914,
48
+ "tweetsentbr": 0.6629793868028777
49
  },
50
  "all": {
51
  "harness|enem_challenge|enem_challenge|None|3": 0.6431070678796361,
 
56
  "harness|faquad_nli|faquad_nli|None|15": 0.7641446815289443,
57
  "harness|hatebr_offensive|hatebr_offensive|None|25": 0.8412698412698412,
58
  "harness|portuguese_hate_speech|portuguese_hate_speech|None|25": 0.6731605919879914,
59
+ "harness|tweetsentbr|tweetsentbr|None|25": 0.6629793868028777
60
  },
61
  "harness|enem_challenge|enem_challenge|None|3": {
62
  "acc,all": 0.6431070678796361,
 
150
  "main_score": 0.6731605919879914
151
  },
152
  "harness|tweetsentbr|tweetsentbr|None|25": {
153
+ "f1_macro,all": 0.6629793868028777,
154
  "acc,all": 0.7059701492537314,
155
+ "main_score": 0.6629793868028777
156
  }
157
  },
158
  "config_tasks": {