{ | |
"results": { | |
"arc_pt": { | |
"acc": 0.24444444444444444, | |
"acc_stderr": 0.012569442967524474, | |
"acc_norm": 0.28888888888888886, | |
"acc_norm_stderr": 0.013256439556126792 | |
}, | |
"hellaswag_pt": { | |
"acc": 0.3326470906923827, | |
"acc_stderr": 0.004904738424240269, | |
"acc_norm": 0.39408386607433094, | |
"acc_norm_stderr": 0.00508682495262388 | |
}, | |
"truthfulqa_pt": { | |
"mc1": 0.23604060913705585, | |
"mc1_stderr": 0.015137046117152837, | |
"mc2": 0.42762827969970946, | |
"mc2_stderr": 0.014911010832660198 | |
} | |
}, | |
"versions": { | |
"arc_pt": 0, | |
"hellaswag_pt": 1, | |
"truthfulqa_pt": 1 | |
}, | |
"config": { | |
"model": "hf-auto", | |
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17032104/step_400000", | |
"batch_size": 1, | |
"device": "cuda:0", | |
"no_cache": false, | |
"limit": null, | |
"bootstrap_iters": 100000, | |
"description_dict": {} | |
} | |
} |