|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.975051975051975, |
|
"global_step": 38000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.935031185031185e-05, |
|
"loss": 1.0477, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.8700623700623704e-05, |
|
"loss": 0.8873, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.805093555093555e-05, |
|
"loss": 0.8297, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.7401247401247406e-05, |
|
"loss": 0.831, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.6751559251559254e-05, |
|
"loss": 0.8251, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.61018711018711e-05, |
|
"loss": 0.7875, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.5452182952182956e-05, |
|
"loss": 0.7701, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.48024948024948e-05, |
|
"loss": 0.7737, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 4.415280665280666e-05, |
|
"loss": 0.7025, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.3503118503118505e-05, |
|
"loss": 0.7269, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.285343035343036e-05, |
|
"loss": 0.6896, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.220374220374221e-05, |
|
"loss": 0.7126, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.1554054054054055e-05, |
|
"loss": 0.7005, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.09043659043659e-05, |
|
"loss": 0.7013, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.0254677754677757e-05, |
|
"loss": 0.6839, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.9604989604989604e-05, |
|
"loss": 0.7126, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.895530145530146e-05, |
|
"loss": 0.6769, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 3.8305613305613306e-05, |
|
"loss": 0.6556, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 3.765592515592516e-05, |
|
"loss": 0.6849, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 3.700623700623701e-05, |
|
"loss": 0.6754, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 3.635654885654886e-05, |
|
"loss": 0.6597, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 3.57068607068607e-05, |
|
"loss": 0.6573, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 3.505717255717256e-05, |
|
"loss": 0.6657, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.4407484407484405e-05, |
|
"loss": 0.6788, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.375779625779626e-05, |
|
"loss": 0.6326, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.310810810810811e-05, |
|
"loss": 0.659, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.245841995841996e-05, |
|
"loss": 0.6573, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.180873180873181e-05, |
|
"loss": 0.6524, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 3.115904365904366e-05, |
|
"loss": 0.6454, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 3.0509355509355507e-05, |
|
"loss": 0.618, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 2.9859667359667358e-05, |
|
"loss": 0.5975, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 2.920997920997921e-05, |
|
"loss": 0.6316, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.856029106029106e-05, |
|
"loss": 0.6224, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.791060291060291e-05, |
|
"loss": 0.6542, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.7260914760914762e-05, |
|
"loss": 0.5946, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 2.6611226611226613e-05, |
|
"loss": 0.6133, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.5961538461538464e-05, |
|
"loss": 0.5985, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 2.531185031185031e-05, |
|
"loss": 0.5793, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7984781265258789, |
|
"eval_loss": 0.6602069139480591, |
|
"eval_runtime": 240.699, |
|
"eval_samples_per_second": 66.61, |
|
"eval_steps_per_second": 11.105, |
|
"step": 19240 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 2.4662162162162162e-05, |
|
"loss": 0.4949, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 2.4012474012474013e-05, |
|
"loss": 0.4505, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 2.3362785862785864e-05, |
|
"loss": 0.4267, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.2713097713097715e-05, |
|
"loss": 0.4567, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 2.2063409563409566e-05, |
|
"loss": 0.4802, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.1413721413721417e-05, |
|
"loss": 0.4739, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 2.0764033264033265e-05, |
|
"loss": 0.4539, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.0114345114345116e-05, |
|
"loss": 0.4609, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 1.9464656964656967e-05, |
|
"loss": 0.48, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 1.8814968814968818e-05, |
|
"loss": 0.4691, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 1.8165280665280665e-05, |
|
"loss": 0.4632, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 1.7515592515592516e-05, |
|
"loss": 0.4545, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.6865904365904367e-05, |
|
"loss": 0.4476, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 1.6216216216216218e-05, |
|
"loss": 0.4576, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 1.5566528066528066e-05, |
|
"loss": 0.4254, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.4916839916839917e-05, |
|
"loss": 0.4471, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.4267151767151768e-05, |
|
"loss": 0.4375, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 1.3617463617463619e-05, |
|
"loss": 0.4515, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 1.2967775467775468e-05, |
|
"loss": 0.4254, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 1.2318087318087319e-05, |
|
"loss": 0.4337, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 1.166839916839917e-05, |
|
"loss": 0.4328, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 1.101871101871102e-05, |
|
"loss": 0.443, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 1.036902286902287e-05, |
|
"loss": 0.4451, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.719334719334721e-06, |
|
"loss": 0.4392, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.06964656964657e-06, |
|
"loss": 0.4457, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.419958419958421e-06, |
|
"loss": 0.4516, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 7.77027027027027e-06, |
|
"loss": 0.4442, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 7.120582120582121e-06, |
|
"loss": 0.4234, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 6.4708939708939705e-06, |
|
"loss": 0.4371, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 5.8212058212058215e-06, |
|
"loss": 0.4367, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 5.1715176715176724e-06, |
|
"loss": 0.413, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.5218295218295225e-06, |
|
"loss": 0.4329, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 3.872141372141373e-06, |
|
"loss": 0.4043, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 3.2224532224532228e-06, |
|
"loss": 0.4361, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.572765072765073e-06, |
|
"loss": 0.4218, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.9230769230769234e-06, |
|
"loss": 0.4011, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.2733887733887735e-06, |
|
"loss": 0.471, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.237006237006237e-07, |
|
"loss": 0.399, |
|
"step": 38000 |
|
} |
|
], |
|
"max_steps": 38480, |
|
"num_train_epochs": 2, |
|
"total_flos": 4.935671182808242e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|