{ | |
"results": { | |
"arc_pt": { | |
"acc": 0.2760683760683761, | |
"acc_stderr": 0.0130752454127398, | |
"acc_norm": 0.30854700854700856, | |
"acc_norm_stderr": 0.01350935904916782 | |
}, | |
"hellaswag_pt": { | |
"acc": 0.37793910499512406, | |
"acc_stderr": 0.005047467981098971, | |
"acc_norm": 0.47513273377397336, | |
"acc_norm_stderr": 0.005198504535035831 | |
}, | |
"truthfulqa_pt": { | |
"mc1": 0.24746192893401014, | |
"mc1_stderr": 0.015382646812261825, | |
"mc2": 0.39066264426629793, | |
"mc2_stderr": 0.014567795673718856 | |
} | |
}, | |
"versions": { | |
"arc_pt": 0, | |
"hellaswag_pt": 1, | |
"truthfulqa_pt": 1 | |
}, | |
"config": { | |
"model": "hf-auto", | |
"model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17782345/step_42164", | |
"batch_size": 1, | |
"device": "cuda:0", | |
"no_cache": false, | |
"limit": null, | |
"bootstrap_iters": 100000, | |
"description_dict": {} | |
} | |
} |