Spaces:
Build error
Build error
logical-reasoning
/
llama-factory
/saves
/Llama3.1-8B-Chinese-Chat
/checkpoint-175
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.9950248756218906, | |
"eval_steps": 35, | |
"global_step": 175, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.028429282160625444, | |
"grad_norm": 6.4533820152282715, | |
"learning_rate": 1.4285714285714285e-05, | |
"loss": 0.9693, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.05685856432125089, | |
"grad_norm": 4.657992839813232, | |
"learning_rate": 2.857142857142857e-05, | |
"loss": 0.7795, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.08528784648187633, | |
"grad_norm": 2.398066997528076, | |
"learning_rate": 4.2857142857142856e-05, | |
"loss": 0.4535, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.11371712864250177, | |
"grad_norm": 1.7500923871994019, | |
"learning_rate": 5.714285714285714e-05, | |
"loss": 0.4519, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.14214641080312723, | |
"grad_norm": 1.989443063735962, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 0.4128, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.17057569296375266, | |
"grad_norm": 1.1362711191177368, | |
"learning_rate": 8.571428571428571e-05, | |
"loss": 0.3446, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"grad_norm": 1.1770528554916382, | |
"learning_rate": 0.0001, | |
"loss": 0.3253, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"eval_loss": 0.28751906752586365, | |
"eval_runtime": 239.7962, | |
"eval_samples_per_second": 10.426, | |
"eval_steps_per_second": 10.426, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.22743425728500355, | |
"grad_norm": 1.1288448572158813, | |
"learning_rate": 9.993784606094612e-05, | |
"loss": 0.3065, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.255863539445629, | |
"grad_norm": 1.0276695489883423, | |
"learning_rate": 9.975153876827008e-05, | |
"loss": 0.302, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.28429282160625446, | |
"grad_norm": 1.2982385158538818, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 0.307, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.31272210376687987, | |
"grad_norm": 1.4258345365524292, | |
"learning_rate": 9.900862439242719e-05, | |
"loss": 0.2908, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.3411513859275053, | |
"grad_norm": 0.7898867726325989, | |
"learning_rate": 9.84538643114539e-05, | |
"loss": 0.3112, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.3695806680881308, | |
"grad_norm": 0.9717791676521301, | |
"learning_rate": 9.777864028930705e-05, | |
"loss": 0.2596, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"grad_norm": 1.0619926452636719, | |
"learning_rate": 9.698463103929542e-05, | |
"loss": 0.2868, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"eval_loss": 0.2600123882293701, | |
"eval_runtime": 240.5378, | |
"eval_samples_per_second": 10.393, | |
"eval_steps_per_second": 10.393, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"grad_norm": 0.8634427189826965, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 0.2763, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.4548685145700071, | |
"grad_norm": 1.2894893884658813, | |
"learning_rate": 9.504844339512095e-05, | |
"loss": 0.254, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48329779673063256, | |
"grad_norm": 1.4364824295043945, | |
"learning_rate": 9.391107866851143e-05, | |
"loss": 0.2834, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.511727078891258, | |
"grad_norm": 0.9127787947654724, | |
"learning_rate": 9.266454408160779e-05, | |
"loss": 0.2509, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.5401563610518835, | |
"grad_norm": 0.9939984083175659, | |
"learning_rate": 9.131193871579975e-05, | |
"loss": 0.266, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.5685856432125089, | |
"grad_norm": 1.184008002281189, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 0.2562, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"grad_norm": 0.6923903226852417, | |
"learning_rate": 8.83022221559489e-05, | |
"loss": 0.2583, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"eval_loss": 0.25079870223999023, | |
"eval_runtime": 240.544, | |
"eval_samples_per_second": 10.393, | |
"eval_steps_per_second": 10.393, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.6254442075337597, | |
"grad_norm": 0.8162419199943542, | |
"learning_rate": 8.665259359149132e-05, | |
"loss": 0.2529, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.6538734896943852, | |
"grad_norm": 0.7928385734558105, | |
"learning_rate": 8.491184090430364e-05, | |
"loss": 0.2482, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.6823027718550106, | |
"grad_norm": 0.8485226035118103, | |
"learning_rate": 8.308429187984297e-05, | |
"loss": 0.244, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.7107320540156361, | |
"grad_norm": 0.8197255730628967, | |
"learning_rate": 8.117449009293668e-05, | |
"loss": 0.2453, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.7391613361762616, | |
"grad_norm": 0.9954875707626343, | |
"learning_rate": 7.91871836117395e-05, | |
"loss": 0.2451, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.767590618336887, | |
"grad_norm": 1.4009358882904053, | |
"learning_rate": 7.712731319328798e-05, | |
"loss": 0.2749, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"grad_norm": 0.8014578819274902, | |
"learning_rate": 7.500000000000001e-05, | |
"loss": 0.2559, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"eval_loss": 0.2278721183538437, | |
"eval_runtime": 239.2855, | |
"eval_samples_per_second": 10.448, | |
"eval_steps_per_second": 10.448, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.8244491826581379, | |
"grad_norm": 1.1141350269317627, | |
"learning_rate": 7.281053286765815e-05, | |
"loss": 0.2483, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.8528784648187633, | |
"grad_norm": 0.9138433933258057, | |
"learning_rate": 7.056435515653059e-05, | |
"loss": 0.2666, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.8813077469793887, | |
"grad_norm": 0.5915958285331726, | |
"learning_rate": 6.826705121831976e-05, | |
"loss": 0.2576, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.9097370291400142, | |
"grad_norm": 0.724046528339386, | |
"learning_rate": 6.592433251258423e-05, | |
"loss": 0.2552, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9381663113006397, | |
"grad_norm": 1.355237364768982, | |
"learning_rate": 6.354202340715026e-05, | |
"loss": 0.2287, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.9665955934612651, | |
"grad_norm": 1.1470948457717896, | |
"learning_rate": 6.112604669781572e-05, | |
"loss": 0.2469, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"grad_norm": 0.7694109678268433, | |
"learning_rate": 5.868240888334653e-05, | |
"loss": 0.2516, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"eval_loss": 0.22209325432777405, | |
"eval_runtime": 239.4751, | |
"eval_samples_per_second": 10.439, | |
"eval_steps_per_second": 10.439, | |
"step": 175 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 350, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 35, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 6.264994298415022e+17, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |