|
{ |
|
"results": { |
|
"assin2_rte": { |
|
"f1_macro,all": 0.8843035078991233, |
|
"acc,all": 0.8843954248366013, |
|
"alias": "assin2_rte" |
|
}, |
|
"assin2_sts": { |
|
"pearson,all": 0.711905064393444, |
|
"mse,all": 0.6730964052287581, |
|
"alias": "assin2_sts" |
|
}, |
|
"bluex": { |
|
"acc,all": 0.48956884561891517, |
|
"acc,exam_id__USP_2019": 0.35, |
|
"acc,exam_id__USP_2022": 0.5306122448979592, |
|
"acc,exam_id__UNICAMP_2023": 0.4418604651162791, |
|
"acc,exam_id__USP_2023": 0.6136363636363636, |
|
"acc,exam_id__USP_2020": 0.42857142857142855, |
|
"acc,exam_id__UNICAMP_2024": 0.5555555555555556, |
|
"acc,exam_id__USP_2021": 0.5, |
|
"acc,exam_id__UNICAMP_2021_1": 0.34782608695652173, |
|
"acc,exam_id__UNICAMP_2021_2": 0.5098039215686274, |
|
"acc,exam_id__UNICAMP_2022": 0.48717948717948717, |
|
"acc,exam_id__USP_2018": 0.5, |
|
"acc,exam_id__USP_2024": 0.6341463414634146, |
|
"acc,exam_id__UNICAMP_2019": 0.56, |
|
"acc,exam_id__UNICAMP_2020": 0.4727272727272727, |
|
"acc,exam_id__UNICAMP_2018": 0.42592592592592593, |
|
"alias": "bluex" |
|
}, |
|
"enem_challenge": { |
|
"alias": "enem", |
|
"acc,all": 0.5780265920223933, |
|
"acc,exam_id__2016": 0.5041322314049587, |
|
"acc,exam_id__2013": 0.6388888888888888, |
|
"acc,exam_id__2011": 0.5897435897435898, |
|
"acc,exam_id__2009": 0.5652173913043478, |
|
"acc,exam_id__2016_2": 0.5934959349593496, |
|
"acc,exam_id__2022": 0.5864661654135338, |
|
"acc,exam_id__2014": 0.5596330275229358, |
|
"acc,exam_id__2015": 0.5546218487394958, |
|
"acc,exam_id__2017": 0.6293103448275862, |
|
"acc,exam_id__2010": 0.5982905982905983, |
|
"acc,exam_id__2012": 0.5948275862068966, |
|
"acc,exam_id__2023": 0.5333333333333333 |
|
}, |
|
"faquad_nli": { |
|
"f1_macro,all": 0.7423468521014304, |
|
"acc,all": 0.8384615384615385, |
|
"alias": "faquad_nli" |
|
}, |
|
"hatebr_offensive": { |
|
"alias": "hatebr_offensive_binary", |
|
"f1_macro,all": 0.6791294642857143, |
|
"acc,all": 0.7042857142857143 |
|
}, |
|
"oab_exams": { |
|
"acc,all": 0.40364464692482915, |
|
"acc,exam_id__2011-05": 0.3875, |
|
"acc,exam_id__2016-21": 0.45, |
|
"acc,exam_id__2017-22": 0.475, |
|
"acc,exam_id__2013-11": 0.3875, |
|
"acc,exam_id__2018-25": 0.4, |
|
"acc,exam_id__2012-09": 0.37662337662337664, |
|
"acc,exam_id__2014-13": 0.3, |
|
"acc,exam_id__2015-17": 0.47435897435897434, |
|
"acc,exam_id__2017-24": 0.4125, |
|
"acc,exam_id__2014-15": 0.5, |
|
"acc,exam_id__2017-23": 0.4125, |
|
"acc,exam_id__2011-03": 0.31313131313131315, |
|
"acc,exam_id__2012-07": 0.4125, |
|
"acc,exam_id__2012-08": 0.3, |
|
"acc,exam_id__2012-06a": 0.375, |
|
"acc,exam_id__2010-01": 0.35294117647058826, |
|
"acc,exam_id__2015-16": 0.375, |
|
"acc,exam_id__2016-19": 0.5, |
|
"acc,exam_id__2015-18": 0.4625, |
|
"acc,exam_id__2012-06": 0.4125, |
|
"acc,exam_id__2014-14": 0.425, |
|
"acc,exam_id__2013-10": 0.4, |
|
"acc,exam_id__2013-12": 0.45, |
|
"acc,exam_id__2016-20": 0.4625, |
|
"acc,exam_id__2011-04": 0.35, |
|
"acc,exam_id__2010-02": 0.46, |
|
"acc,exam_id__2016-20a": 0.2875, |
|
"alias": "oab_exams" |
|
}, |
|
"portuguese_hate_speech": { |
|
"alias": "portuguese_hate_speech_binary", |
|
"f1_macro,all": 0.6532273087132678, |
|
"acc,all": 0.7532314923619271 |
|
}, |
|
"tweetsentbr": { |
|
"f1_macro,all": 0.6260269631440111, |
|
"acc,all": 0.6592039800995025, |
|
"alias": "tweetsentbr" |
|
} |
|
}, |
|
"configs": { |
|
"assin2_rte": { |
|
"task": "assin2_rte", |
|
"group": [ |
|
"pt_benchmark", |
|
"assin2" |
|
], |
|
"dataset_path": "assin2", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:", |
|
"doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}", |
|
"description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
1, |
|
3251, |
|
2, |
|
3252, |
|
3, |
|
4, |
|
5, |
|
6, |
|
3253, |
|
7, |
|
3254, |
|
3255, |
|
3256, |
|
8, |
|
9, |
|
10, |
|
3257, |
|
11, |
|
3258, |
|
12, |
|
13, |
|
14, |
|
15, |
|
3259, |
|
3260, |
|
3261, |
|
3262, |
|
3263, |
|
16, |
|
17, |
|
3264, |
|
18, |
|
3265, |
|
3266, |
|
3267, |
|
19, |
|
20, |
|
3268, |
|
3269, |
|
21, |
|
3270, |
|
3271, |
|
22, |
|
3272, |
|
3273, |
|
23, |
|
3274, |
|
24, |
|
25, |
|
3275 |
|
], |
|
"id_column": "sentence_pair_id" |
|
} |
|
}, |
|
"num_fewshot": 15, |
|
"metric_list": [ |
|
{ |
|
"metric": "f1_macro", |
|
"aggregation": "f1_macro", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "find_similar_label", |
|
"labels": [ |
|
"Sim", |
|
"Não" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.1 |
|
} |
|
}, |
|
"assin2_sts": { |
|
"task": "assin2_sts", |
|
"group": [ |
|
"pt_benchmark", |
|
"assin2" |
|
], |
|
"dataset_path": "assin2", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:", |
|
"doc_to_target": "<function assin2_float_to_pt_str at 0x7fcc9bba2160>", |
|
"description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
1, |
|
3251, |
|
2, |
|
3252, |
|
3, |
|
4, |
|
5, |
|
6, |
|
3253, |
|
7, |
|
3254, |
|
3255, |
|
3256, |
|
8, |
|
9, |
|
10, |
|
3257, |
|
11, |
|
3258, |
|
12, |
|
13, |
|
14, |
|
15, |
|
3259, |
|
3260, |
|
3261, |
|
3262, |
|
3263, |
|
16, |
|
17, |
|
3264, |
|
18, |
|
3265, |
|
3266, |
|
3267, |
|
19, |
|
20, |
|
3268, |
|
3269, |
|
21, |
|
3270, |
|
3271, |
|
22, |
|
3272, |
|
3273, |
|
23, |
|
3274, |
|
24, |
|
25, |
|
3275 |
|
], |
|
"id_column": "sentence_pair_id" |
|
} |
|
}, |
|
"num_fewshot": 15, |
|
"metric_list": [ |
|
{ |
|
"metric": "pearson", |
|
"aggregation": "pearsonr", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "mse", |
|
"aggregation": "mean_squared_error", |
|
"higher_is_better": false |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "number_filter", |
|
"type": "float", |
|
"range_min": 1.0, |
|
"range_max": 5.0, |
|
"on_outside_range": "clip", |
|
"fallback": 5.0 |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.1 |
|
} |
|
}, |
|
"bluex": { |
|
"task": "bluex", |
|
"group": [ |
|
"pt_benchmark", |
|
"vestibular" |
|
], |
|
"dataset_path": "eduagarcia-temp/BLUEX_without_images", |
|
"test_split": "train", |
|
"fewshot_split": "train", |
|
"doc_to_text": "<function enem_doc_to_text at 0x7fcc9bba1b20>", |
|
"doc_to_target": "{{answerKey}}", |
|
"description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
"USP_2018_3", |
|
"UNICAMP_2018_2", |
|
"USP_2018_35", |
|
"UNICAMP_2018_16", |
|
"USP_2018_89" |
|
], |
|
"id_column": "id", |
|
"exclude_from_task": true |
|
} |
|
}, |
|
"num_fewshot": 3, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "normalize_spaces" |
|
}, |
|
{ |
|
"function": "remove_accents" |
|
}, |
|
{ |
|
"function": "find_choices", |
|
"choices": [ |
|
"A", |
|
"B", |
|
"C", |
|
"D", |
|
"E" |
|
], |
|
"regex_patterns": [ |
|
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", |
|
"\\b([ABCDE])\\.", |
|
"\\b([ABCDE]) ?[.):-]", |
|
"\\b([ABCDE])$", |
|
"\\b([ABCDE])\\b" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
], |
|
"group_by": { |
|
"column": "exam_id" |
|
} |
|
} |
|
], |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fcc9bba1da0>", |
|
"metadata": { |
|
"version": 1.1 |
|
} |
|
}, |
|
"enem_challenge": { |
|
"task": "enem_challenge", |
|
"task_alias": "enem", |
|
"group": [ |
|
"pt_benchmark", |
|
"vestibular" |
|
], |
|
"dataset_path": "eduagarcia/enem_challenge", |
|
"test_split": "train", |
|
"fewshot_split": "train", |
|
"doc_to_text": "<function enem_doc_to_text at 0x7fcc9bba2340>", |
|
"doc_to_target": "{{answerKey}}", |
|
"description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
"2022_21", |
|
"2022_88", |
|
"2022_143" |
|
], |
|
"id_column": "id", |
|
"exclude_from_task": true |
|
} |
|
}, |
|
"num_fewshot": 3, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "normalize_spaces" |
|
}, |
|
{ |
|
"function": "remove_accents" |
|
}, |
|
{ |
|
"function": "find_choices", |
|
"choices": [ |
|
"A", |
|
"B", |
|
"C", |
|
"D", |
|
"E" |
|
], |
|
"regex_patterns": [ |
|
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b", |
|
"\\b([ABCDE])\\.", |
|
"\\b([ABCDE]) ?[.):-]", |
|
"\\b([ABCDE])$", |
|
"\\b([ABCDE])\\b" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
], |
|
"group_by": { |
|
"column": "exam_id" |
|
} |
|
} |
|
], |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "<function enem_doc_to_text at 0x7fcc9bba25c0>", |
|
"metadata": { |
|
"version": 1.1 |
|
} |
|
}, |
|
"faquad_nli": { |
|
"task": "faquad_nli", |
|
"group": [ |
|
"pt_benchmark" |
|
], |
|
"dataset_path": "ruanchaves/faquad-nli", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?", |
|
"doc_to_target": "{{['Não', 'Sim'][label]}}", |
|
"description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "first_n", |
|
"sampler_config": { |
|
"fewshot_indices": [ |
|
1893, |
|
949, |
|
663, |
|
105, |
|
1169, |
|
2910, |
|
2227, |
|
2813, |
|
974, |
|
558, |
|
1503, |
|
1958, |
|
2918, |
|
601, |
|
1560, |
|
984, |
|
2388, |
|
995, |
|
2233, |
|
1982, |
|
165, |
|
2788, |
|
1312, |
|
2285, |
|
522, |
|
1113, |
|
1670, |
|
323, |
|
236, |
|
1263, |
|
1562, |
|
2519, |
|
1049, |
|
432, |
|
1167, |
|
1394, |
|
2022, |
|
2551, |
|
2194, |
|
2187, |
|
2282, |
|
2816, |
|
108, |
|
301, |
|
1185, |
|
1315, |
|
1420, |
|
2436, |
|
2322, |
|
766 |
|
] |
|
} |
|
}, |
|
"num_fewshot": 15, |
|
"metric_list": [ |
|
{ |
|
"metric": "f1_macro", |
|
"aggregation": "f1_macro", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "find_similar_label", |
|
"labels": [ |
|
"Sim", |
|
"Não" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.1 |
|
} |
|
}, |
|
"hatebr_offensive": { |
|
"task": "hatebr_offensive", |
|
"task_alias": "hatebr_offensive_binary", |
|
"group": [ |
|
"pt_benchmark" |
|
], |
|
"dataset_path": "eduagarcia/portuguese_benchmark", |
|
"dataset_name": "HateBR_offensive_binary", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:", |
|
"doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", |
|
"description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
48, |
|
44, |
|
36, |
|
20, |
|
3511, |
|
88, |
|
3555, |
|
16, |
|
56, |
|
3535, |
|
60, |
|
40, |
|
3527, |
|
4, |
|
76, |
|
3579, |
|
3523, |
|
3551, |
|
68, |
|
3503, |
|
84, |
|
3539, |
|
64, |
|
3599, |
|
80, |
|
3563, |
|
3559, |
|
3543, |
|
3547, |
|
3587, |
|
3595, |
|
3575, |
|
3567, |
|
3591, |
|
24, |
|
96, |
|
92, |
|
3507, |
|
52, |
|
72, |
|
8, |
|
3571, |
|
3515, |
|
3519, |
|
3531, |
|
28, |
|
32, |
|
0, |
|
12, |
|
3583 |
|
], |
|
"id_column": "idx" |
|
} |
|
}, |
|
"num_fewshot": 25, |
|
"metric_list": [ |
|
{ |
|
"metric": "f1_macro", |
|
"aggregation": "f1_macro", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "find_similar_label", |
|
"labels": [ |
|
"Sim", |
|
"Não" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.0 |
|
} |
|
}, |
|
"oab_exams": { |
|
"task": "oab_exams", |
|
"group": [ |
|
"legal_benchmark", |
|
"pt_benchmark" |
|
], |
|
"dataset_path": "eduagarcia/oab_exams", |
|
"test_split": "train", |
|
"fewshot_split": "train", |
|
"doc_to_text": "<function doc_to_text at 0x7fcc9bba14e0>", |
|
"doc_to_target": "{{answerKey}}", |
|
"description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
"2010-01_1", |
|
"2010-01_11", |
|
"2010-01_13", |
|
"2010-01_23", |
|
"2010-01_26", |
|
"2010-01_28", |
|
"2010-01_38", |
|
"2010-01_48", |
|
"2010-01_58", |
|
"2010-01_68", |
|
"2010-01_76", |
|
"2010-01_83", |
|
"2010-01_85", |
|
"2010-01_91", |
|
"2010-01_99" |
|
], |
|
"id_column": "id", |
|
"exclude_from_task": true |
|
} |
|
}, |
|
"num_fewshot": 3, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "normalize_spaces" |
|
}, |
|
{ |
|
"function": "remove_accents" |
|
}, |
|
{ |
|
"function": "find_choices", |
|
"choices": [ |
|
"A", |
|
"B", |
|
"C", |
|
"D" |
|
], |
|
"regex_patterns": [ |
|
"(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b", |
|
"\\b([ABCD])\\.", |
|
"\\b([ABCD]) ?[.):-]", |
|
"\\b([ABCD])$", |
|
"\\b([ABCD])\\b" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
], |
|
"group_by": { |
|
"column": "exam_id" |
|
} |
|
} |
|
], |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "<function doc_to_text at 0x7fcc9bba1760>", |
|
"metadata": { |
|
"version": 1.5 |
|
} |
|
}, |
|
"portuguese_hate_speech": { |
|
"task": "portuguese_hate_speech", |
|
"task_alias": "portuguese_hate_speech_binary", |
|
"group": [ |
|
"pt_benchmark" |
|
], |
|
"dataset_path": "eduagarcia/portuguese_benchmark", |
|
"dataset_name": "Portuguese_Hate_Speech_binary", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:", |
|
"doc_to_target": "{{'Sim' if label == 1 else 'Não'}}", |
|
"description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "id_sampler", |
|
"sampler_config": { |
|
"id_list": [ |
|
52, |
|
50, |
|
39, |
|
28, |
|
3, |
|
105, |
|
22, |
|
25, |
|
60, |
|
11, |
|
66, |
|
41, |
|
9, |
|
4, |
|
91, |
|
42, |
|
7, |
|
20, |
|
76, |
|
1, |
|
104, |
|
13, |
|
67, |
|
54, |
|
97, |
|
27, |
|
24, |
|
14, |
|
16, |
|
48, |
|
53, |
|
40, |
|
34, |
|
49, |
|
32, |
|
119, |
|
114, |
|
2, |
|
58, |
|
83, |
|
18, |
|
36, |
|
5, |
|
6, |
|
10, |
|
35, |
|
38, |
|
0, |
|
21, |
|
46 |
|
], |
|
"id_column": "idx" |
|
} |
|
}, |
|
"num_fewshot": 25, |
|
"metric_list": [ |
|
{ |
|
"metric": "f1_macro", |
|
"aggregation": "f1_macro", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "find_similar_label", |
|
"labels": [ |
|
"Sim", |
|
"Não" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.0 |
|
} |
|
}, |
|
"tweetsentbr": { |
|
"task": "tweetsentbr", |
|
"group": [ |
|
"pt_benchmark" |
|
], |
|
"dataset_path": "eduagarcia/tweetsentbr_fewshot", |
|
"test_split": "test", |
|
"fewshot_split": "train", |
|
"doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:", |
|
"doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}", |
|
"description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"fewshot_config": { |
|
"sampler": "first_n" |
|
}, |
|
"num_fewshot": 25, |
|
"metric_list": [ |
|
{ |
|
"metric": "f1_macro", |
|
"aggregation": "f1_macro", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "acc", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "generate_until", |
|
"generation_kwargs": { |
|
"max_gen_toks": 32, |
|
"do_sample": false, |
|
"temperature": 0.0, |
|
"top_k": null, |
|
"top_p": null, |
|
"until": [ |
|
"\n\n" |
|
] |
|
}, |
|
"repeats": 1, |
|
"filter_list": [ |
|
{ |
|
"name": "all", |
|
"filter": [ |
|
{ |
|
"function": "find_similar_label", |
|
"labels": [ |
|
"Positivo", |
|
"Neutro", |
|
"Negativo" |
|
] |
|
}, |
|
{ |
|
"function": "take_first" |
|
} |
|
] |
|
} |
|
], |
|
"should_decontaminate": false, |
|
"metadata": { |
|
"version": 1.0 |
|
} |
|
} |
|
}, |
|
"versions": { |
|
"assin2_rte": 1.1, |
|
"assin2_sts": 1.1, |
|
"bluex": 1.1, |
|
"enem_challenge": 1.1, |
|
"faquad_nli": 1.1, |
|
"hatebr_offensive": 1.0, |
|
"oab_exams": 1.5, |
|
"portuguese_hate_speech": 1.0, |
|
"tweetsentbr": 1.0 |
|
}, |
|
"n-shot": { |
|
"assin2_rte": 15, |
|
"assin2_sts": 15, |
|
"bluex": 3, |
|
"enem_challenge": 3, |
|
"faquad_nli": 15, |
|
"hatebr_offensive": 25, |
|
"oab_exams": 3, |
|
"portuguese_hate_speech": 25, |
|
"tweetsentbr": 25 |
|
}, |
|
"model_meta": { |
|
"truncated": 3, |
|
"non_truncated": 14147, |
|
"padded": 0, |
|
"non_padded": 14150, |
|
"fewshots_truncated": 3, |
|
"has_chat_template": true, |
|
"chat_type": "user_assistant", |
|
"n_gpus": 1, |
|
"accelerate_num_process": null, |
|
"model_sha": "b02dec0b3cbf271023798c6bf5525db8f8ea46ef", |
|
"model_dtype": "torch.bfloat16", |
|
"model_memory_footprint": 15020343296, |
|
"model_num_parameters": 7241732096, |
|
"model_is_loaded_in_4bit": null, |
|
"model_is_loaded_in_8bit": null, |
|
"model_is_quantized": null, |
|
"model_device": "cuda:0", |
|
"batch_size": 16, |
|
"max_length": 2560, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32 |
|
}, |
|
"task_model_meta": { |
|
"assin2_rte": { |
|
"sample_size": 2448, |
|
"truncated": 0, |
|
"non_truncated": 2448, |
|
"padded": 0, |
|
"non_padded": 2448, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1466.7455065359477, |
|
"min_seq_length": 1443, |
|
"max_seq_length": 1533, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 15.0, |
|
"mean_effective_fewshot_size": 15.0 |
|
}, |
|
"assin2_sts": { |
|
"sample_size": 2448, |
|
"truncated": 0, |
|
"non_truncated": 2448, |
|
"padded": 0, |
|
"non_padded": 2448, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1690.7455065359477, |
|
"min_seq_length": 1667, |
|
"max_seq_length": 1757, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 15.0, |
|
"mean_effective_fewshot_size": 15.0 |
|
}, |
|
"bluex": { |
|
"sample_size": 719, |
|
"truncated": 1, |
|
"non_truncated": 718, |
|
"padded": 0, |
|
"non_padded": 719, |
|
"fewshots_truncated": 1, |
|
"mean_seq_length": 1747.9262865090404, |
|
"min_seq_length": 1371, |
|
"max_seq_length": 2548, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 3.0, |
|
"mean_effective_fewshot_size": 2.998609179415855 |
|
}, |
|
"enem_challenge": { |
|
"sample_size": 1429, |
|
"truncated": 2, |
|
"non_truncated": 1427, |
|
"padded": 0, |
|
"non_padded": 1429, |
|
"fewshots_truncated": 2, |
|
"mean_seq_length": 1648.039188243527, |
|
"min_seq_length": 1382, |
|
"max_seq_length": 2646, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 3.0, |
|
"mean_effective_fewshot_size": 2.998600419874038 |
|
}, |
|
"faquad_nli": { |
|
"sample_size": 650, |
|
"truncated": 0, |
|
"non_truncated": 650, |
|
"padded": 0, |
|
"non_padded": 650, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1706.9876923076922, |
|
"min_seq_length": 1651, |
|
"max_seq_length": 1827, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 15.0, |
|
"mean_effective_fewshot_size": 15.0 |
|
}, |
|
"hatebr_offensive": { |
|
"sample_size": 1400, |
|
"truncated": 0, |
|
"non_truncated": 1400, |
|
"padded": 0, |
|
"non_padded": 1400, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1487.3878571428572, |
|
"min_seq_length": 1464, |
|
"max_seq_length": 1738, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 25.0, |
|
"mean_effective_fewshot_size": 25.0 |
|
}, |
|
"oab_exams": { |
|
"sample_size": 2195, |
|
"truncated": 0, |
|
"non_truncated": 2195, |
|
"padded": 0, |
|
"non_padded": 2195, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1393.764464692483, |
|
"min_seq_length": 1127, |
|
"max_seq_length": 1896, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 3.0, |
|
"mean_effective_fewshot_size": 3.0 |
|
}, |
|
"portuguese_hate_speech": { |
|
"sample_size": 851, |
|
"truncated": 0, |
|
"non_truncated": 851, |
|
"padded": 0, |
|
"non_padded": 851, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1988.3360752056403, |
|
"min_seq_length": 1953, |
|
"max_seq_length": 2027, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 25.0, |
|
"mean_effective_fewshot_size": 25.0 |
|
}, |
|
"tweetsentbr": { |
|
"sample_size": 2010, |
|
"truncated": 0, |
|
"non_truncated": 2010, |
|
"padded": 0, |
|
"non_padded": 2010, |
|
"fewshots_truncated": 0, |
|
"mean_seq_length": 1734.2492537313433, |
|
"min_seq_length": 1713, |
|
"max_seq_length": 1829, |
|
"max_ctx_length": 2528, |
|
"max_gen_toks": 32, |
|
"mean_original_fewshots_size": 25.0, |
|
"mean_effective_fewshot_size": 25.0 |
|
} |
|
}, |
|
"config": { |
|
"model": "huggingface", |
|
"model_args": "pretrained=TIGER-Lab/MAmmoTH2-7B-Plus,dtype=bfloat16,device=cuda:0,revision=main,trust_remote_code=True,starting_max_length=2560", |
|
"batch_size": "auto", |
|
"batch_sizes": [], |
|
"device": null, |
|
"use_cache": null, |
|
"limit": [ |
|
null, |
|
null, |
|
null, |
|
null, |
|
null, |
|
null, |
|
null, |
|
null, |
|
null |
|
], |
|
"bootstrap_iters": 0, |
|
"gen_kwargs": null |
|
}, |
|
"git_hash": "51e0e5e" |
|
} |