|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.8021265092809515, |
|
"eval_steps": 500, |
|
"global_step": 20000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04505316273202379, |
|
"grad_norm": 4495.31298828125, |
|
"learning_rate": 1.25e-05, |
|
"loss": 876.4691, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09010632546404758, |
|
"grad_norm": 7690.41552734375, |
|
"learning_rate": 2.5e-05, |
|
"loss": 552.9783, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.13515948819607138, |
|
"grad_norm": 3831.229736328125, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 508.4013, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18021265092809516, |
|
"grad_norm": 2652.883056640625, |
|
"learning_rate": 5e-05, |
|
"loss": 493.1244, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22526581366011894, |
|
"grad_norm": 2329.73388671875, |
|
"learning_rate": 4.990486745229364e-05, |
|
"loss": 471.2945, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.27031897639214275, |
|
"grad_norm": 2660.374267578125, |
|
"learning_rate": 4.962019382530521e-05, |
|
"loss": 462.8009, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.3153721391241665, |
|
"grad_norm": 2498.203857421875, |
|
"learning_rate": 4.914814565722671e-05, |
|
"loss": 446.8442, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.3604253018561903, |
|
"grad_norm": 3495.0498046875, |
|
"learning_rate": 4.849231551964771e-05, |
|
"loss": 449.2647, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.40547846458821407, |
|
"grad_norm": 2064.2861328125, |
|
"learning_rate": 4.765769467591625e-05, |
|
"loss": 438.1682, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.4505316273202379, |
|
"grad_norm": 2323.6640625, |
|
"learning_rate": 4.665063509461097e-05, |
|
"loss": 434.8857, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.4955847900522617, |
|
"grad_norm": 3712.401611328125, |
|
"learning_rate": 4.54788011072248e-05, |
|
"loss": 426.7452, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.5406379527842855, |
|
"grad_norm": 3010.898193359375, |
|
"learning_rate": 4.415111107797445e-05, |
|
"loss": 425.659, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.5856911155163093, |
|
"grad_norm": 2957.86572265625, |
|
"learning_rate": 4.267766952966369e-05, |
|
"loss": 414.4347, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.630744278248333, |
|
"grad_norm": 5558.51171875, |
|
"learning_rate": 4.1069690242163484e-05, |
|
"loss": 414.9519, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.6757974409803568, |
|
"grad_norm": 2855.100341796875, |
|
"learning_rate": 3.933941090877615e-05, |
|
"loss": 400.0895, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7208506037123806, |
|
"grad_norm": 6043.2392578125, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 406.6636, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.7659037664444044, |
|
"grad_norm": 2479.6162109375, |
|
"learning_rate": 3.556545654351749e-05, |
|
"loss": 396.584, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.8109569291764281, |
|
"grad_norm": 2344.032958984375, |
|
"learning_rate": 3.355050358314172e-05, |
|
"loss": 394.9724, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.856010091908452, |
|
"grad_norm": 2779.349853515625, |
|
"learning_rate": 3.147047612756302e-05, |
|
"loss": 397.4112, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.9010632546404758, |
|
"grad_norm": 2716.18994140625, |
|
"learning_rate": 2.9341204441673266e-05, |
|
"loss": 391.9731, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.9461164173724995, |
|
"grad_norm": 2225.15673828125, |
|
"learning_rate": 2.717889356869146e-05, |
|
"loss": 393.7706, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.9911695801045234, |
|
"grad_norm": 1508.1142578125, |
|
"learning_rate": 2.5e-05, |
|
"loss": 382.8044, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 358.9232177734375, |
|
"eval_runtime": 101.4193, |
|
"eval_samples_per_second": 97.269, |
|
"eval_steps_per_second": 12.167, |
|
"step": 11098 |
|
}, |
|
{ |
|
"epoch": 1.0362227428365471, |
|
"grad_norm": 2014.6854248046875, |
|
"learning_rate": 2.2821106431308544e-05, |
|
"loss": 367.5687, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.081275905568571, |
|
"grad_norm": 3775.6591796875, |
|
"learning_rate": 2.0658795558326743e-05, |
|
"loss": 359.601, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.1263290683005946, |
|
"grad_norm": 5092.62548828125, |
|
"learning_rate": 1.852952387243698e-05, |
|
"loss": 363.3713, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.1713822310326185, |
|
"grad_norm": 2063.513671875, |
|
"learning_rate": 1.6449496416858284e-05, |
|
"loss": 359.5718, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.2164353937646424, |
|
"grad_norm": 2899.44091796875, |
|
"learning_rate": 1.443454345648252e-05, |
|
"loss": 360.4644, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.261488556496666, |
|
"grad_norm": 4710.76123046875, |
|
"learning_rate": 1.2500000000000006e-05, |
|
"loss": 361.0999, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.30654171922869, |
|
"grad_norm": 2558.06005859375, |
|
"learning_rate": 1.0660589091223855e-05, |
|
"loss": 350.5707, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.3515948819607138, |
|
"grad_norm": 3802.858642578125, |
|
"learning_rate": 8.930309757836517e-06, |
|
"loss": 357.1574, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.3966480446927374, |
|
"grad_norm": 2361.53662109375, |
|
"learning_rate": 7.3223304703363135e-06, |
|
"loss": 347.2792, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.4417012074247613, |
|
"grad_norm": 2280.421630859375, |
|
"learning_rate": 5.848888922025553e-06, |
|
"loss": 349.3208, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.4867543701567851, |
|
"grad_norm": 2809.066650390625, |
|
"learning_rate": 4.521198892775203e-06, |
|
"loss": 353.3406, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.5318075328888088, |
|
"grad_norm": 2266.29248046875, |
|
"learning_rate": 3.3493649053890326e-06, |
|
"loss": 349.8974, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.5768606956208324, |
|
"grad_norm": 2905.138671875, |
|
"learning_rate": 2.3423053240837515e-06, |
|
"loss": 342.301, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.6219138583528565, |
|
"grad_norm": 2068.38330078125, |
|
"learning_rate": 1.5076844803522922e-06, |
|
"loss": 347.1441, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.6669670210848802, |
|
"grad_norm": 2976.573486328125, |
|
"learning_rate": 8.51854342773295e-07, |
|
"loss": 349.023, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.7120201838169038, |
|
"grad_norm": 3883.230712890625, |
|
"learning_rate": 3.7980617469479953e-07, |
|
"loss": 356.191, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.7570733465489279, |
|
"grad_norm": 1931.0743408203125, |
|
"learning_rate": 9.513254770636137e-08, |
|
"loss": 344.1293, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.8021265092809515, |
|
"grad_norm": 2924.070556640625, |
|
"learning_rate": 0.0, |
|
"loss": 350.3576, |
|
"step": 20000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 20000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 5000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|