|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 210.0839983200336, |
|
"global_step": 250000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1e-05, |
|
"learning_rate_embeddings": 1e-05, |
|
"loss": 8.3386, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 2e-05, |
|
"learning_rate_embeddings": 2e-05, |
|
"loss": 6.258, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 3e-05, |
|
"learning_rate_embeddings": 3e-05, |
|
"loss": 5.5397, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4e-05, |
|
"learning_rate_embeddings": 4e-05, |
|
"loss": 4.9715, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 5e-05, |
|
"learning_rate_embeddings": 5e-05, |
|
"loss": 4.5681, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 6e-05, |
|
"learning_rate_embeddings": 6e-05, |
|
"loss": 4.2633, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 7.000000000000001e-05, |
|
"learning_rate_embeddings": 7.000000000000001e-05, |
|
"loss": 4.0337, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 8e-05, |
|
"learning_rate_embeddings": 8e-05, |
|
"loss": 3.8598, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 8.999999999999999e-05, |
|
"learning_rate_embeddings": 8.999999999999999e-05, |
|
"loss": 3.7204, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 0.0001, |
|
"learning_rate_embeddings": 0.0001, |
|
"loss": 3.5946, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 0.00011, |
|
"learning_rate_embeddings": 0.00011, |
|
"loss": 3.4963, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 5.04, |
|
"learning_rate": 0.00012, |
|
"learning_rate_embeddings": 0.00012, |
|
"loss": 3.4189, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 5.46, |
|
"learning_rate": 0.00013000000000000002, |
|
"learning_rate_embeddings": 0.00013000000000000002, |
|
"loss": 3.3369, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00014000000000000001, |
|
"learning_rate_embeddings": 0.00014000000000000001, |
|
"loss": 3.2837, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 6.3, |
|
"learning_rate": 0.00015, |
|
"learning_rate_embeddings": 0.00015, |
|
"loss": 3.2359, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 0.00016, |
|
"learning_rate_embeddings": 0.00016, |
|
"loss": 3.1786, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 0.00017, |
|
"learning_rate_embeddings": 0.00017, |
|
"loss": 3.1396, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 7.56, |
|
"learning_rate": 0.00017999999999999998, |
|
"learning_rate_embeddings": 0.00017999999999999998, |
|
"loss": 3.0931, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 0.00019, |
|
"learning_rate_embeddings": 0.00019, |
|
"loss": 3.0626, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 8.4, |
|
"learning_rate": 0.0002, |
|
"learning_rate_embeddings": 0.0002, |
|
"loss": 3.0226, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 0.00021, |
|
"learning_rate_embeddings": 0.00021, |
|
"loss": 3.0012, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 9.24, |
|
"learning_rate": 0.00022, |
|
"learning_rate_embeddings": 0.00022, |
|
"loss": 2.9775, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 9.66, |
|
"learning_rate": 0.00023, |
|
"learning_rate_embeddings": 0.00023, |
|
"loss": 2.9286, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 10.08, |
|
"learning_rate": 0.00024, |
|
"learning_rate_embeddings": 0.00024, |
|
"loss": 2.918, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"learning_rate": 0.00025, |
|
"learning_rate_embeddings": 0.00025, |
|
"loss": 2.8907, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"eval_loss": 2.839508533477783, |
|
"eval_runtime": 131.8919, |
|
"eval_samples_per_second": 513.36, |
|
"eval_steps_per_second": 4.011, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 10.92, |
|
"learning_rate": 0.00026000000000000003, |
|
"learning_rate_embeddings": 0.00026000000000000003, |
|
"loss": 2.8764, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 11.34, |
|
"learning_rate": 0.00027, |
|
"learning_rate_embeddings": 0.00027, |
|
"loss": 2.8521, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 0.00028000000000000003, |
|
"learning_rate_embeddings": 0.00028000000000000003, |
|
"loss": 2.8284, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 0.00029, |
|
"learning_rate_embeddings": 0.00029, |
|
"loss": 2.8186, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 12.6, |
|
"learning_rate": 0.0003, |
|
"learning_rate_embeddings": 0.0003, |
|
"loss": 2.7952, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 13.03, |
|
"learning_rate": 0.00031, |
|
"learning_rate_embeddings": 0.00031, |
|
"loss": 2.7799, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 0.00032, |
|
"learning_rate_embeddings": 0.00032, |
|
"loss": 2.7439, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 13.87, |
|
"learning_rate": 0.00033, |
|
"learning_rate_embeddings": 0.00033, |
|
"loss": 2.7479, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 14.29, |
|
"learning_rate": 0.00034, |
|
"learning_rate_embeddings": 0.00034, |
|
"loss": 2.7336, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 14.71, |
|
"learning_rate": 0.00035, |
|
"learning_rate_embeddings": 0.00035, |
|
"loss": 2.7165, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 15.13, |
|
"learning_rate": 0.00035999999999999997, |
|
"learning_rate_embeddings": 0.00035999999999999997, |
|
"loss": 2.709, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 0.00037, |
|
"learning_rate_embeddings": 0.00037, |
|
"loss": 2.6872, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 15.97, |
|
"learning_rate": 0.00038, |
|
"learning_rate_embeddings": 0.00038, |
|
"loss": 2.6845, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 16.39, |
|
"learning_rate": 0.00039000000000000005, |
|
"learning_rate_embeddings": 0.00039000000000000005, |
|
"loss": 2.6605, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 16.81, |
|
"learning_rate": 0.0004, |
|
"learning_rate_embeddings": 0.0004, |
|
"loss": 2.6651, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 17.23, |
|
"learning_rate": 0.00041, |
|
"learning_rate_embeddings": 0.00041, |
|
"loss": 2.6473, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 17.65, |
|
"learning_rate": 0.00042, |
|
"learning_rate_embeddings": 0.00042, |
|
"loss": 2.6304, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 18.07, |
|
"learning_rate": 0.00043, |
|
"learning_rate_embeddings": 0.00043, |
|
"loss": 2.6374, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 18.49, |
|
"learning_rate": 0.00044, |
|
"learning_rate_embeddings": 0.00044, |
|
"loss": 2.6037, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 18.91, |
|
"learning_rate": 0.00045000000000000004, |
|
"learning_rate_embeddings": 0.00045000000000000004, |
|
"loss": 2.6156, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 0.00046, |
|
"learning_rate_embeddings": 0.00046, |
|
"loss": 2.5906, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 19.75, |
|
"learning_rate": 0.00047, |
|
"learning_rate_embeddings": 0.00047, |
|
"loss": 2.5834, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 20.17, |
|
"learning_rate": 0.00048, |
|
"learning_rate_embeddings": 0.00048, |
|
"loss": 2.5827, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 20.59, |
|
"learning_rate": 0.00049, |
|
"learning_rate_embeddings": 0.00049, |
|
"loss": 2.571, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"learning_rate": 0.0005, |
|
"learning_rate_embeddings": 0.0005, |
|
"loss": 2.5741, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"eval_loss": 2.566823959350586, |
|
"eval_runtime": 117.9068, |
|
"eval_samples_per_second": 574.25, |
|
"eval_steps_per_second": 4.487, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 21.43, |
|
"learning_rate": 0.0004988888888888889, |
|
"learning_rate_embeddings": 0.0004988888888888889, |
|
"loss": 2.5472, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 21.85, |
|
"learning_rate": 0.0004977777777777778, |
|
"learning_rate_embeddings": 0.0004977777777777778, |
|
"loss": 2.561, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 22.27, |
|
"learning_rate": 0.0004966666666666666, |
|
"learning_rate_embeddings": 0.0004966666666666666, |
|
"loss": 2.5334, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 22.69, |
|
"learning_rate": 0.0004955555555555556, |
|
"learning_rate_embeddings": 0.0004955555555555556, |
|
"loss": 2.532, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 23.11, |
|
"learning_rate": 0.0004944444444444445, |
|
"learning_rate_embeddings": 0.0004944444444444445, |
|
"loss": 2.5293, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 23.53, |
|
"learning_rate": 0.0004933333333333334, |
|
"learning_rate_embeddings": 0.0004933333333333334, |
|
"loss": 2.5181, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"learning_rate": 0.0004922222222222222, |
|
"learning_rate_embeddings": 0.0004922222222222222, |
|
"loss": 2.5275, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 24.37, |
|
"learning_rate": 0.0004911111111111111, |
|
"learning_rate_embeddings": 0.0004911111111111111, |
|
"loss": 2.5083, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 24.79, |
|
"learning_rate": 0.00049, |
|
"learning_rate_embeddings": 0.00049, |
|
"loss": 2.5055, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 25.21, |
|
"learning_rate": 0.0004888888888888889, |
|
"learning_rate_embeddings": 0.0004888888888888889, |
|
"loss": 2.4865, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 25.63, |
|
"learning_rate": 0.0004877777777777778, |
|
"learning_rate_embeddings": 0.0004877777777777778, |
|
"loss": 2.4861, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 26.05, |
|
"learning_rate": 0.0004866666666666667, |
|
"learning_rate_embeddings": 0.0004866666666666667, |
|
"loss": 2.499, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 26.47, |
|
"learning_rate": 0.0004855555555555556, |
|
"learning_rate_embeddings": 0.0004855555555555556, |
|
"loss": 2.4629, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 26.89, |
|
"learning_rate": 0.00048444444444444446, |
|
"learning_rate_embeddings": 0.00048444444444444446, |
|
"loss": 2.4815, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 27.31, |
|
"learning_rate": 0.00048333333333333334, |
|
"learning_rate_embeddings": 0.00048333333333333334, |
|
"loss": 2.4599, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 27.73, |
|
"learning_rate": 0.0004822222222222222, |
|
"learning_rate_embeddings": 0.0004822222222222222, |
|
"loss": 2.4602, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 28.15, |
|
"learning_rate": 0.0004811111111111111, |
|
"learning_rate_embeddings": 0.0004811111111111111, |
|
"loss": 2.469, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 28.57, |
|
"learning_rate": 0.00048, |
|
"learning_rate_embeddings": 0.00048, |
|
"loss": 2.458, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 28.99, |
|
"learning_rate": 0.0004788888888888889, |
|
"learning_rate_embeddings": 0.0004788888888888889, |
|
"loss": 2.4497, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 29.41, |
|
"learning_rate": 0.0004777777777777778, |
|
"learning_rate_embeddings": 0.0004777777777777778, |
|
"loss": 2.4361, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 29.83, |
|
"learning_rate": 0.0004766666666666667, |
|
"learning_rate_embeddings": 0.0004766666666666667, |
|
"loss": 2.4574, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 30.25, |
|
"learning_rate": 0.00047555555555555556, |
|
"learning_rate_embeddings": 0.00047555555555555556, |
|
"loss": 2.4342, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 0.00047444444444444444, |
|
"learning_rate_embeddings": 0.00047444444444444444, |
|
"loss": 2.4379, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 31.09, |
|
"learning_rate": 0.00047333333333333336, |
|
"learning_rate_embeddings": 0.00047333333333333336, |
|
"loss": 2.4303, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 31.51, |
|
"learning_rate": 0.00047222222222222224, |
|
"learning_rate_embeddings": 0.00047222222222222224, |
|
"loss": 2.4203, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 31.51, |
|
"eval_loss": 2.4701642990112305, |
|
"eval_runtime": 117.9305, |
|
"eval_samples_per_second": 574.135, |
|
"eval_steps_per_second": 4.486, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 31.93, |
|
"learning_rate": 0.0004711111111111111, |
|
"learning_rate_embeddings": 0.0004711111111111111, |
|
"loss": 2.4321, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 32.35, |
|
"learning_rate": 0.00047, |
|
"learning_rate_embeddings": 0.00047, |
|
"loss": 2.4119, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 32.77, |
|
"learning_rate": 0.0004688888888888889, |
|
"learning_rate_embeddings": 0.0004688888888888889, |
|
"loss": 2.4266, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 33.19, |
|
"learning_rate": 0.0004677777777777778, |
|
"learning_rate_embeddings": 0.0004677777777777778, |
|
"loss": 2.419, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 33.61, |
|
"learning_rate": 0.00046666666666666666, |
|
"learning_rate_embeddings": 0.00046666666666666666, |
|
"loss": 2.4148, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 34.03, |
|
"learning_rate": 0.0004655555555555556, |
|
"learning_rate_embeddings": 0.0004655555555555556, |
|
"loss": 2.4137, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 34.45, |
|
"learning_rate": 0.00046444444444444446, |
|
"learning_rate_embeddings": 0.00046444444444444446, |
|
"loss": 2.4042, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 34.87, |
|
"learning_rate": 0.00046333333333333334, |
|
"learning_rate_embeddings": 0.00046333333333333334, |
|
"loss": 2.4017, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 35.29, |
|
"learning_rate": 0.0004622222222222222, |
|
"learning_rate_embeddings": 0.0004622222222222222, |
|
"loss": 2.3965, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 35.71, |
|
"learning_rate": 0.00046111111111111114, |
|
"learning_rate_embeddings": 0.00046111111111111114, |
|
"loss": 2.3876, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 36.13, |
|
"learning_rate": 0.00046, |
|
"learning_rate_embeddings": 0.00046, |
|
"loss": 2.3936, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 36.55, |
|
"learning_rate": 0.0004588888888888889, |
|
"learning_rate_embeddings": 0.0004588888888888889, |
|
"loss": 2.3879, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 36.97, |
|
"learning_rate": 0.0004577777777777778, |
|
"learning_rate_embeddings": 0.0004577777777777778, |
|
"loss": 2.3967, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 37.39, |
|
"learning_rate": 0.0004566666666666667, |
|
"learning_rate_embeddings": 0.0004566666666666667, |
|
"loss": 2.374, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 37.81, |
|
"learning_rate": 0.00045555555555555556, |
|
"learning_rate_embeddings": 0.00045555555555555556, |
|
"loss": 2.3918, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 38.24, |
|
"learning_rate": 0.00045444444444444444, |
|
"learning_rate_embeddings": 0.00045444444444444444, |
|
"loss": 2.3791, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 38.66, |
|
"learning_rate": 0.0004533333333333333, |
|
"learning_rate_embeddings": 0.0004533333333333333, |
|
"loss": 2.3799, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 39.08, |
|
"learning_rate": 0.00045222222222222224, |
|
"learning_rate_embeddings": 0.00045222222222222224, |
|
"loss": 2.3938, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 39.5, |
|
"learning_rate": 0.0004511111111111111, |
|
"learning_rate_embeddings": 0.0004511111111111111, |
|
"loss": 2.3739, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 39.92, |
|
"learning_rate": 0.00045000000000000004, |
|
"learning_rate_embeddings": 0.00045000000000000004, |
|
"loss": 2.3757, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 40.34, |
|
"learning_rate": 0.0004488888888888889, |
|
"learning_rate_embeddings": 0.0004488888888888889, |
|
"loss": 2.3656, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 40.76, |
|
"learning_rate": 0.0004477777777777778, |
|
"learning_rate_embeddings": 0.0004477777777777778, |
|
"loss": 2.3712, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 41.18, |
|
"learning_rate": 0.00044666666666666666, |
|
"learning_rate_embeddings": 0.00044666666666666666, |
|
"loss": 2.368, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 41.6, |
|
"learning_rate": 0.00044555555555555554, |
|
"learning_rate_embeddings": 0.00044555555555555554, |
|
"loss": 2.3585, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 42.02, |
|
"learning_rate": 0.0004444444444444444, |
|
"learning_rate_embeddings": 0.0004444444444444444, |
|
"loss": 2.3721, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 42.02, |
|
"eval_loss": 2.4272959232330322, |
|
"eval_runtime": 118.2532, |
|
"eval_samples_per_second": 572.568, |
|
"eval_steps_per_second": 4.473, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 42.44, |
|
"learning_rate": 0.00044333333333333334, |
|
"learning_rate_embeddings": 0.00044333333333333334, |
|
"loss": 2.3561, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 42.86, |
|
"learning_rate": 0.00044222222222222227, |
|
"learning_rate_embeddings": 0.00044222222222222227, |
|
"loss": 2.3623, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 43.28, |
|
"learning_rate": 0.00044111111111111114, |
|
"learning_rate_embeddings": 0.00044111111111111114, |
|
"loss": 2.3465, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 43.7, |
|
"learning_rate": 0.00044, |
|
"learning_rate_embeddings": 0.00044, |
|
"loss": 2.365, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 44.12, |
|
"learning_rate": 0.0004388888888888889, |
|
"learning_rate_embeddings": 0.0004388888888888889, |
|
"loss": 2.3568, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 44.54, |
|
"learning_rate": 0.00043777777777777776, |
|
"learning_rate_embeddings": 0.00043777777777777776, |
|
"loss": 2.3502, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 44.96, |
|
"learning_rate": 0.00043666666666666664, |
|
"learning_rate_embeddings": 0.00043666666666666664, |
|
"loss": 2.3521, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 45.38, |
|
"learning_rate": 0.0004355555555555555, |
|
"learning_rate_embeddings": 0.0004355555555555555, |
|
"loss": 2.3435, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 45.8, |
|
"learning_rate": 0.0004344444444444445, |
|
"learning_rate_embeddings": 0.0004344444444444445, |
|
"loss": 2.345, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 46.22, |
|
"learning_rate": 0.00043333333333333337, |
|
"learning_rate_embeddings": 0.00043333333333333337, |
|
"loss": 2.3477, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 46.64, |
|
"learning_rate": 0.00043222222222222224, |
|
"learning_rate_embeddings": 0.00043222222222222224, |
|
"loss": 2.336, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"learning_rate": 0.0004311111111111111, |
|
"learning_rate_embeddings": 0.0004311111111111111, |
|
"loss": 2.3475, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 47.48, |
|
"learning_rate": 0.00043, |
|
"learning_rate_embeddings": 0.00043, |
|
"loss": 2.3292, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 47.9, |
|
"learning_rate": 0.00042888888888888886, |
|
"learning_rate_embeddings": 0.00042888888888888886, |
|
"loss": 2.3491, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 48.32, |
|
"learning_rate": 0.0004277777777777778, |
|
"learning_rate_embeddings": 0.0004277777777777778, |
|
"loss": 2.341, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 48.74, |
|
"learning_rate": 0.0004266666666666667, |
|
"learning_rate_embeddings": 0.0004266666666666667, |
|
"loss": 2.3308, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 49.16, |
|
"learning_rate": 0.0004255555555555556, |
|
"learning_rate_embeddings": 0.0004255555555555556, |
|
"loss": 2.3379, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 49.58, |
|
"learning_rate": 0.00042444444444444447, |
|
"learning_rate_embeddings": 0.00042444444444444447, |
|
"loss": 2.3315, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 0.00042333333333333334, |
|
"learning_rate_embeddings": 0.00042333333333333334, |
|
"loss": 2.3408, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 50.42, |
|
"learning_rate": 0.0004222222222222222, |
|
"learning_rate_embeddings": 0.0004222222222222222, |
|
"loss": 2.3248, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 50.84, |
|
"learning_rate": 0.0004211111111111111, |
|
"learning_rate_embeddings": 0.0004211111111111111, |
|
"loss": 2.3307, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 51.26, |
|
"learning_rate": 0.00042, |
|
"learning_rate_embeddings": 0.00042, |
|
"loss": 2.3275, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 51.68, |
|
"learning_rate": 0.0004188888888888889, |
|
"learning_rate_embeddings": 0.0004188888888888889, |
|
"loss": 2.3251, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 52.1, |
|
"learning_rate": 0.0004177777777777778, |
|
"learning_rate_embeddings": 0.0004177777777777778, |
|
"loss": 2.3249, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 52.52, |
|
"learning_rate": 0.0004166666666666667, |
|
"learning_rate_embeddings": 0.0004166666666666667, |
|
"loss": 2.3142, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 52.52, |
|
"eval_loss": 2.400498867034912, |
|
"eval_runtime": 117.9974, |
|
"eval_samples_per_second": 573.809, |
|
"eval_steps_per_second": 4.483, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 52.94, |
|
"learning_rate": 0.00041555555555555557, |
|
"learning_rate_embeddings": 0.00041555555555555557, |
|
"loss": 2.3245, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 53.36, |
|
"learning_rate": 0.00041444444444444444, |
|
"learning_rate_embeddings": 0.00041444444444444444, |
|
"loss": 2.3147, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 53.78, |
|
"learning_rate": 0.0004133333333333333, |
|
"learning_rate_embeddings": 0.0004133333333333333, |
|
"loss": 2.3264, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 54.2, |
|
"learning_rate": 0.00041222222222222224, |
|
"learning_rate_embeddings": 0.00041222222222222224, |
|
"loss": 2.3162, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 54.62, |
|
"learning_rate": 0.0004111111111111111, |
|
"learning_rate_embeddings": 0.0004111111111111111, |
|
"loss": 2.3142, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 55.04, |
|
"learning_rate": 0.00041, |
|
"learning_rate_embeddings": 0.00041, |
|
"loss": 2.3228, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 55.46, |
|
"learning_rate": 0.0004088888888888889, |
|
"learning_rate_embeddings": 0.0004088888888888889, |
|
"loss": 2.2975, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 55.88, |
|
"learning_rate": 0.0004077777777777778, |
|
"learning_rate_embeddings": 0.0004077777777777778, |
|
"loss": 2.3202, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 56.3, |
|
"learning_rate": 0.00040666666666666667, |
|
"learning_rate_embeddings": 0.00040666666666666667, |
|
"loss": 2.3113, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 56.72, |
|
"learning_rate": 0.00040555555555555554, |
|
"learning_rate_embeddings": 0.00040555555555555554, |
|
"loss": 2.3096, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 57.14, |
|
"learning_rate": 0.00040444444444444447, |
|
"learning_rate_embeddings": 0.00040444444444444447, |
|
"loss": 2.3102, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 57.56, |
|
"learning_rate": 0.00040333333333333334, |
|
"learning_rate_embeddings": 0.00040333333333333334, |
|
"loss": 2.2945, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 57.98, |
|
"learning_rate": 0.0004022222222222222, |
|
"learning_rate_embeddings": 0.0004022222222222222, |
|
"loss": 2.3174, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 58.4, |
|
"learning_rate": 0.0004011111111111111, |
|
"learning_rate_embeddings": 0.0004011111111111111, |
|
"loss": 2.2979, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 58.82, |
|
"learning_rate": 0.0004, |
|
"learning_rate_embeddings": 0.0004, |
|
"loss": 2.3039, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 59.24, |
|
"learning_rate": 0.0003988888888888889, |
|
"learning_rate_embeddings": 0.0003988888888888889, |
|
"loss": 2.3082, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 59.66, |
|
"learning_rate": 0.00039777777777777777, |
|
"learning_rate_embeddings": 0.00039777777777777777, |
|
"loss": 2.3063, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 60.08, |
|
"learning_rate": 0.0003966666666666667, |
|
"learning_rate_embeddings": 0.0003966666666666667, |
|
"loss": 2.3047, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 60.5, |
|
"learning_rate": 0.00039555555555555557, |
|
"learning_rate_embeddings": 0.00039555555555555557, |
|
"loss": 2.2869, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 60.92, |
|
"learning_rate": 0.00039444444444444444, |
|
"learning_rate_embeddings": 0.00039444444444444444, |
|
"loss": 2.3031, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 61.34, |
|
"learning_rate": 0.0003933333333333333, |
|
"learning_rate_embeddings": 0.0003933333333333333, |
|
"loss": 2.2872, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 61.76, |
|
"learning_rate": 0.00039222222222222225, |
|
"learning_rate_embeddings": 0.00039222222222222225, |
|
"loss": 2.2996, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 62.18, |
|
"learning_rate": 0.0003911111111111111, |
|
"learning_rate_embeddings": 0.0003911111111111111, |
|
"loss": 2.2943, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 62.6, |
|
"learning_rate": 0.00039000000000000005, |
|
"learning_rate_embeddings": 0.00039000000000000005, |
|
"loss": 2.2937, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 63.03, |
|
"learning_rate": 0.0003888888888888889, |
|
"learning_rate_embeddings": 0.0003888888888888889, |
|
"loss": 2.3, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 63.03, |
|
"eval_loss": 2.39066219329834, |
|
"eval_runtime": 117.9428, |
|
"eval_samples_per_second": 574.075, |
|
"eval_steps_per_second": 4.485, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 63.45, |
|
"learning_rate": 0.0003877777777777778, |
|
"learning_rate_embeddings": 0.0003877777777777778, |
|
"loss": 2.2961, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 63.87, |
|
"learning_rate": 0.00038666666666666667, |
|
"learning_rate_embeddings": 0.00038666666666666667, |
|
"loss": 2.2994, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 64.29, |
|
"learning_rate": 0.00038555555555555554, |
|
"learning_rate_embeddings": 0.00038555555555555554, |
|
"loss": 2.2929, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 64.71, |
|
"learning_rate": 0.0003844444444444444, |
|
"learning_rate_embeddings": 0.0003844444444444444, |
|
"loss": 2.2994, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 65.13, |
|
"learning_rate": 0.00038333333333333334, |
|
"learning_rate_embeddings": 0.00038333333333333334, |
|
"loss": 2.3068, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 65.55, |
|
"learning_rate": 0.0003822222222222223, |
|
"learning_rate_embeddings": 0.0003822222222222223, |
|
"loss": 2.2818, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 65.97, |
|
"learning_rate": 0.00038111111111111115, |
|
"learning_rate_embeddings": 0.00038111111111111115, |
|
"loss": 2.2925, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 66.39, |
|
"learning_rate": 0.00038, |
|
"learning_rate_embeddings": 0.00038, |
|
"loss": 2.2828, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 66.81, |
|
"learning_rate": 0.0003788888888888889, |
|
"learning_rate_embeddings": 0.0003788888888888889, |
|
"loss": 2.298, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 67.23, |
|
"learning_rate": 0.00037777777777777777, |
|
"learning_rate_embeddings": 0.00037777777777777777, |
|
"loss": 2.2866, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 67.65, |
|
"learning_rate": 0.00037666666666666664, |
|
"learning_rate_embeddings": 0.00037666666666666664, |
|
"loss": 2.2917, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 68.07, |
|
"learning_rate": 0.0003755555555555555, |
|
"learning_rate_embeddings": 0.0003755555555555555, |
|
"loss": 2.2881, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 68.49, |
|
"learning_rate": 0.0003744444444444445, |
|
"learning_rate_embeddings": 0.0003744444444444445, |
|
"loss": 2.2837, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 68.91, |
|
"learning_rate": 0.0003733333333333334, |
|
"learning_rate_embeddings": 0.0003733333333333334, |
|
"loss": 2.2836, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 0.00037222222222222225, |
|
"learning_rate_embeddings": 0.00037222222222222225, |
|
"loss": 2.2715, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 69.75, |
|
"learning_rate": 0.0003711111111111111, |
|
"learning_rate_embeddings": 0.0003711111111111111, |
|
"loss": 2.29, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 70.17, |
|
"learning_rate": 0.00037, |
|
"learning_rate_embeddings": 0.00037, |
|
"loss": 2.276, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 70.59, |
|
"learning_rate": 0.00036888888888888887, |
|
"learning_rate_embeddings": 0.00036888888888888887, |
|
"loss": 2.2796, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 71.01, |
|
"learning_rate": 0.00036777777777777774, |
|
"learning_rate_embeddings": 0.00036777777777777774, |
|
"loss": 2.2864, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 71.43, |
|
"learning_rate": 0.00036666666666666667, |
|
"learning_rate_embeddings": 0.00036666666666666667, |
|
"loss": 2.2673, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 71.85, |
|
"learning_rate": 0.0003655555555555556, |
|
"learning_rate_embeddings": 0.0003655555555555556, |
|
"loss": 2.2769, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 72.27, |
|
"learning_rate": 0.00036444444444444447, |
|
"learning_rate_embeddings": 0.00036444444444444447, |
|
"loss": 2.2738, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 72.69, |
|
"learning_rate": 0.00036333333333333335, |
|
"learning_rate_embeddings": 0.00036333333333333335, |
|
"loss": 2.273, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 73.11, |
|
"learning_rate": 0.0003622222222222222, |
|
"learning_rate_embeddings": 0.0003622222222222222, |
|
"loss": 2.2796, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 73.53, |
|
"learning_rate": 0.0003611111111111111, |
|
"learning_rate_embeddings": 0.0003611111111111111, |
|
"loss": 2.2718, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 73.53, |
|
"eval_loss": 2.375969409942627, |
|
"eval_runtime": 117.8648, |
|
"eval_samples_per_second": 574.455, |
|
"eval_steps_per_second": 4.488, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 73.95, |
|
"learning_rate": 0.00035999999999999997, |
|
"learning_rate_embeddings": 0.00035999999999999997, |
|
"loss": 2.2792, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 74.37, |
|
"learning_rate": 0.0003588888888888889, |
|
"learning_rate_embeddings": 0.0003588888888888889, |
|
"loss": 2.2674, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 74.79, |
|
"learning_rate": 0.00035777777777777777, |
|
"learning_rate_embeddings": 0.00035777777777777777, |
|
"loss": 2.2713, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 75.21, |
|
"learning_rate": 0.0003566666666666667, |
|
"learning_rate_embeddings": 0.0003566666666666667, |
|
"loss": 2.2771, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 75.63, |
|
"learning_rate": 0.00035555555555555557, |
|
"learning_rate_embeddings": 0.00035555555555555557, |
|
"loss": 2.2696, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 76.05, |
|
"learning_rate": 0.00035444444444444445, |
|
"learning_rate_embeddings": 0.00035444444444444445, |
|
"loss": 2.2767, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 76.47, |
|
"learning_rate": 0.0003533333333333333, |
|
"learning_rate_embeddings": 0.0003533333333333333, |
|
"loss": 2.2606, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 76.89, |
|
"learning_rate": 0.00035222222222222225, |
|
"learning_rate_embeddings": 0.00035222222222222225, |
|
"loss": 2.2622, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 77.31, |
|
"learning_rate": 0.0003511111111111111, |
|
"learning_rate_embeddings": 0.0003511111111111111, |
|
"loss": 2.2661, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 77.73, |
|
"learning_rate": 0.00035, |
|
"learning_rate_embeddings": 0.00035, |
|
"loss": 2.2712, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 78.15, |
|
"learning_rate": 0.0003488888888888889, |
|
"learning_rate_embeddings": 0.0003488888888888889, |
|
"loss": 2.2667, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 78.57, |
|
"learning_rate": 0.0003477777777777778, |
|
"learning_rate_embeddings": 0.0003477777777777778, |
|
"loss": 2.2621, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 78.99, |
|
"learning_rate": 0.00034666666666666667, |
|
"learning_rate_embeddings": 0.00034666666666666667, |
|
"loss": 2.2717, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 79.41, |
|
"learning_rate": 0.00034555555555555555, |
|
"learning_rate_embeddings": 0.00034555555555555555, |
|
"loss": 2.2536, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 79.83, |
|
"learning_rate": 0.0003444444444444445, |
|
"learning_rate_embeddings": 0.0003444444444444445, |
|
"loss": 2.2705, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 80.25, |
|
"learning_rate": 0.00034333333333333335, |
|
"learning_rate_embeddings": 0.00034333333333333335, |
|
"loss": 2.2659, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 80.67, |
|
"learning_rate": 0.0003422222222222222, |
|
"learning_rate_embeddings": 0.0003422222222222222, |
|
"loss": 2.2648, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 81.09, |
|
"learning_rate": 0.0003411111111111111, |
|
"learning_rate_embeddings": 0.0003411111111111111, |
|
"loss": 2.2671, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 81.51, |
|
"learning_rate": 0.00034, |
|
"learning_rate_embeddings": 0.00034, |
|
"loss": 2.2566, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 81.93, |
|
"learning_rate": 0.0003388888888888889, |
|
"learning_rate_embeddings": 0.0003388888888888889, |
|
"loss": 2.2726, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 82.35, |
|
"learning_rate": 0.00033777777777777777, |
|
"learning_rate_embeddings": 0.00033777777777777777, |
|
"loss": 2.2503, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 82.77, |
|
"learning_rate": 0.0003366666666666667, |
|
"learning_rate_embeddings": 0.0003366666666666667, |
|
"loss": 2.2573, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 83.19, |
|
"learning_rate": 0.0003355555555555556, |
|
"learning_rate_embeddings": 0.0003355555555555556, |
|
"loss": 2.2644, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 83.61, |
|
"learning_rate": 0.00033444444444444445, |
|
"learning_rate_embeddings": 0.00033444444444444445, |
|
"loss": 2.2525, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 84.03, |
|
"learning_rate": 0.0003333333333333333, |
|
"learning_rate_embeddings": 0.0003333333333333333, |
|
"loss": 2.2627, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 84.03, |
|
"eval_loss": 2.3599398136138916, |
|
"eval_runtime": 117.9754, |
|
"eval_samples_per_second": 573.916, |
|
"eval_steps_per_second": 4.484, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 84.45, |
|
"learning_rate": 0.0003322222222222222, |
|
"learning_rate_embeddings": 0.0003322222222222222, |
|
"loss": 2.2558, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 84.87, |
|
"learning_rate": 0.0003311111111111111, |
|
"learning_rate_embeddings": 0.0003311111111111111, |
|
"loss": 2.2678, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 85.29, |
|
"learning_rate": 0.00033, |
|
"learning_rate_embeddings": 0.00033, |
|
"loss": 2.2504, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 85.71, |
|
"learning_rate": 0.0003288888888888889, |
|
"learning_rate_embeddings": 0.0003288888888888889, |
|
"loss": 2.2486, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 86.13, |
|
"learning_rate": 0.0003277777777777778, |
|
"learning_rate_embeddings": 0.0003277777777777778, |
|
"loss": 2.2584, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 86.55, |
|
"learning_rate": 0.0003266666666666667, |
|
"learning_rate_embeddings": 0.0003266666666666667, |
|
"loss": 2.2436, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 86.97, |
|
"learning_rate": 0.00032555555555555555, |
|
"learning_rate_embeddings": 0.00032555555555555555, |
|
"loss": 2.2623, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 87.39, |
|
"learning_rate": 0.0003244444444444444, |
|
"learning_rate_embeddings": 0.0003244444444444444, |
|
"loss": 2.2446, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 87.81, |
|
"learning_rate": 0.0003233333333333333, |
|
"learning_rate_embeddings": 0.0003233333333333333, |
|
"loss": 2.2468, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 88.24, |
|
"learning_rate": 0.0003222222222222222, |
|
"learning_rate_embeddings": 0.0003222222222222222, |
|
"loss": 2.2445, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 88.66, |
|
"learning_rate": 0.00032111111111111115, |
|
"learning_rate_embeddings": 0.00032111111111111115, |
|
"loss": 2.254, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 89.08, |
|
"learning_rate": 0.00032, |
|
"learning_rate_embeddings": 0.00032, |
|
"loss": 2.264, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 89.5, |
|
"learning_rate": 0.0003188888888888889, |
|
"learning_rate_embeddings": 0.0003188888888888889, |
|
"loss": 2.2492, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 89.92, |
|
"learning_rate": 0.0003177777777777778, |
|
"learning_rate_embeddings": 0.0003177777777777778, |
|
"loss": 2.2563, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 90.34, |
|
"learning_rate": 0.00031666666666666665, |
|
"learning_rate_embeddings": 0.00031666666666666665, |
|
"loss": 2.2282, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 90.76, |
|
"learning_rate": 0.0003155555555555555, |
|
"learning_rate_embeddings": 0.0003155555555555555, |
|
"loss": 2.2429, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 91.18, |
|
"learning_rate": 0.0003144444444444445, |
|
"learning_rate_embeddings": 0.0003144444444444445, |
|
"loss": 2.2458, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 91.6, |
|
"learning_rate": 0.0003133333333333334, |
|
"learning_rate_embeddings": 0.0003133333333333334, |
|
"loss": 2.2512, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 92.02, |
|
"learning_rate": 0.00031222222222222225, |
|
"learning_rate_embeddings": 0.00031222222222222225, |
|
"loss": 2.2461, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 92.44, |
|
"learning_rate": 0.0003111111111111111, |
|
"learning_rate_embeddings": 0.0003111111111111111, |
|
"loss": 2.2313, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 92.86, |
|
"learning_rate": 0.00031, |
|
"learning_rate_embeddings": 0.00031, |
|
"loss": 2.2521, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 93.28, |
|
"learning_rate": 0.0003088888888888889, |
|
"learning_rate_embeddings": 0.0003088888888888889, |
|
"loss": 2.2322, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 93.7, |
|
"learning_rate": 0.00030777777777777775, |
|
"learning_rate_embeddings": 0.00030777777777777775, |
|
"loss": 2.2432, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 94.12, |
|
"learning_rate": 0.0003066666666666667, |
|
"learning_rate_embeddings": 0.0003066666666666667, |
|
"loss": 2.239, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 94.54, |
|
"learning_rate": 0.0003055555555555556, |
|
"learning_rate_embeddings": 0.0003055555555555556, |
|
"loss": 2.2238, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 94.54, |
|
"eval_loss": 2.356623411178589, |
|
"eval_runtime": 118.007, |
|
"eval_samples_per_second": 573.763, |
|
"eval_steps_per_second": 4.483, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 94.96, |
|
"learning_rate": 0.0003044444444444445, |
|
"learning_rate_embeddings": 0.0003044444444444445, |
|
"loss": 2.237, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 95.38, |
|
"learning_rate": 0.00030333333333333335, |
|
"learning_rate_embeddings": 0.00030333333333333335, |
|
"loss": 2.23, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 95.8, |
|
"learning_rate": 0.0003022222222222222, |
|
"learning_rate_embeddings": 0.0003022222222222222, |
|
"loss": 2.2383, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 96.22, |
|
"learning_rate": 0.0003011111111111111, |
|
"learning_rate_embeddings": 0.0003011111111111111, |
|
"loss": 2.234, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 96.64, |
|
"learning_rate": 0.0003, |
|
"learning_rate_embeddings": 0.0003, |
|
"loss": 2.2285, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 97.06, |
|
"learning_rate": 0.0002988888888888889, |
|
"learning_rate_embeddings": 0.0002988888888888889, |
|
"loss": 2.2527, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 97.48, |
|
"learning_rate": 0.0002977777777777778, |
|
"learning_rate_embeddings": 0.0002977777777777778, |
|
"loss": 2.2366, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 97.9, |
|
"learning_rate": 0.0002966666666666667, |
|
"learning_rate_embeddings": 0.0002966666666666667, |
|
"loss": 2.2437, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 98.32, |
|
"learning_rate": 0.0002955555555555556, |
|
"learning_rate_embeddings": 0.0002955555555555556, |
|
"loss": 2.2262, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 98.74, |
|
"learning_rate": 0.00029444444444444445, |
|
"learning_rate_embeddings": 0.00029444444444444445, |
|
"loss": 2.235, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 99.16, |
|
"learning_rate": 0.0002933333333333333, |
|
"learning_rate_embeddings": 0.0002933333333333333, |
|
"loss": 2.2463, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 99.58, |
|
"learning_rate": 0.0002922222222222222, |
|
"learning_rate_embeddings": 0.0002922222222222222, |
|
"loss": 2.2273, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 0.00029111111111111113, |
|
"learning_rate_embeddings": 0.00029111111111111113, |
|
"loss": 2.2438, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 100.42, |
|
"learning_rate": 0.00029, |
|
"learning_rate_embeddings": 0.00029, |
|
"loss": 2.2216, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 100.84, |
|
"learning_rate": 0.0002888888888888889, |
|
"learning_rate_embeddings": 0.0002888888888888889, |
|
"loss": 2.2296, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 101.26, |
|
"learning_rate": 0.0002877777777777778, |
|
"learning_rate_embeddings": 0.0002877777777777778, |
|
"loss": 2.2246, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 101.68, |
|
"learning_rate": 0.0002866666666666667, |
|
"learning_rate_embeddings": 0.0002866666666666667, |
|
"loss": 2.2296, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 102.1, |
|
"learning_rate": 0.00028555555555555555, |
|
"learning_rate_embeddings": 0.00028555555555555555, |
|
"loss": 2.2296, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 102.52, |
|
"learning_rate": 0.0002844444444444444, |
|
"learning_rate_embeddings": 0.0002844444444444444, |
|
"loss": 2.238, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 102.94, |
|
"learning_rate": 0.00028333333333333335, |
|
"learning_rate_embeddings": 0.00028333333333333335, |
|
"loss": 2.2346, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 103.36, |
|
"learning_rate": 0.00028222222222222223, |
|
"learning_rate_embeddings": 0.00028222222222222223, |
|
"loss": 2.2188, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 103.78, |
|
"learning_rate": 0.0002811111111111111, |
|
"learning_rate_embeddings": 0.0002811111111111111, |
|
"loss": 2.2319, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 104.2, |
|
"learning_rate": 0.00028000000000000003, |
|
"learning_rate_embeddings": 0.00028000000000000003, |
|
"loss": 2.2312, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 104.62, |
|
"learning_rate": 0.0002788888888888889, |
|
"learning_rate_embeddings": 0.0002788888888888889, |
|
"loss": 2.2273, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 105.04, |
|
"learning_rate": 0.0002777777777777778, |
|
"learning_rate_embeddings": 0.0002777777777777778, |
|
"loss": 2.2271, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 105.04, |
|
"eval_loss": 2.353121042251587, |
|
"eval_runtime": 118.053, |
|
"eval_samples_per_second": 573.539, |
|
"eval_steps_per_second": 4.481, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 105.46, |
|
"learning_rate": 0.00027666666666666665, |
|
"learning_rate_embeddings": 0.00027666666666666665, |
|
"loss": 2.2206, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 105.88, |
|
"learning_rate": 0.0002755555555555556, |
|
"learning_rate_embeddings": 0.0002755555555555556, |
|
"loss": 2.2287, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 106.3, |
|
"learning_rate": 0.00027444444444444445, |
|
"learning_rate_embeddings": 0.00027444444444444445, |
|
"loss": 2.2179, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 106.72, |
|
"learning_rate": 0.00027333333333333333, |
|
"learning_rate_embeddings": 0.00027333333333333333, |
|
"loss": 2.2206, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 107.14, |
|
"learning_rate": 0.0002722222222222222, |
|
"learning_rate_embeddings": 0.0002722222222222222, |
|
"loss": 2.2271, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 107.56, |
|
"learning_rate": 0.00027111111111111113, |
|
"learning_rate_embeddings": 0.00027111111111111113, |
|
"loss": 2.2219, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 107.98, |
|
"learning_rate": 0.00027, |
|
"learning_rate_embeddings": 0.00027, |
|
"loss": 2.2333, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 108.4, |
|
"learning_rate": 0.00026888888888888893, |
|
"learning_rate_embeddings": 0.00026888888888888893, |
|
"loss": 2.218, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 108.82, |
|
"learning_rate": 0.0002677777777777778, |
|
"learning_rate_embeddings": 0.0002677777777777778, |
|
"loss": 2.2332, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 109.24, |
|
"learning_rate": 0.0002666666666666667, |
|
"learning_rate_embeddings": 0.0002666666666666667, |
|
"loss": 2.2199, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 109.66, |
|
"learning_rate": 0.00026555555555555555, |
|
"learning_rate_embeddings": 0.00026555555555555555, |
|
"loss": 2.2111, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 110.08, |
|
"learning_rate": 0.00026444444444444443, |
|
"learning_rate_embeddings": 0.00026444444444444443, |
|
"loss": 2.2294, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 110.5, |
|
"learning_rate": 0.0002633333333333333, |
|
"learning_rate_embeddings": 0.0002633333333333333, |
|
"loss": 2.2118, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 110.92, |
|
"learning_rate": 0.00026222222222222223, |
|
"learning_rate_embeddings": 0.00026222222222222223, |
|
"loss": 2.2269, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 111.34, |
|
"learning_rate": 0.00026111111111111116, |
|
"learning_rate_embeddings": 0.00026111111111111116, |
|
"loss": 2.2142, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 111.76, |
|
"learning_rate": 0.00026000000000000003, |
|
"learning_rate_embeddings": 0.00026000000000000003, |
|
"loss": 2.2195, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 112.18, |
|
"learning_rate": 0.0002588888888888889, |
|
"learning_rate_embeddings": 0.0002588888888888889, |
|
"loss": 2.2152, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 112.6, |
|
"learning_rate": 0.0002577777777777778, |
|
"learning_rate_embeddings": 0.0002577777777777778, |
|
"loss": 2.2244, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 113.03, |
|
"learning_rate": 0.00025666666666666665, |
|
"learning_rate_embeddings": 0.00025666666666666665, |
|
"loss": 2.2308, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 113.45, |
|
"learning_rate": 0.00025555555555555553, |
|
"learning_rate_embeddings": 0.00025555555555555553, |
|
"loss": 2.2138, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 113.87, |
|
"learning_rate": 0.0002544444444444444, |
|
"learning_rate_embeddings": 0.0002544444444444444, |
|
"loss": 2.2158, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 114.29, |
|
"learning_rate": 0.0002533333333333334, |
|
"learning_rate_embeddings": 0.0002533333333333334, |
|
"loss": 2.2147, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 114.71, |
|
"learning_rate": 0.00025222222222222226, |
|
"learning_rate_embeddings": 0.00025222222222222226, |
|
"loss": 2.2134, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 115.13, |
|
"learning_rate": 0.00025111111111111113, |
|
"learning_rate_embeddings": 0.00025111111111111113, |
|
"loss": 2.2084, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 115.55, |
|
"learning_rate": 0.00025, |
|
"learning_rate_embeddings": 0.00025, |
|
"loss": 2.2041, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 115.55, |
|
"eval_loss": 2.3373777866363525, |
|
"eval_runtime": 117.8501, |
|
"eval_samples_per_second": 574.527, |
|
"eval_steps_per_second": 4.489, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 115.97, |
|
"learning_rate": 0.0002488888888888889, |
|
"learning_rate_embeddings": 0.0002488888888888889, |
|
"loss": 2.226, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 116.39, |
|
"learning_rate": 0.0002477777777777778, |
|
"learning_rate_embeddings": 0.0002477777777777778, |
|
"loss": 2.1967, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 116.81, |
|
"learning_rate": 0.0002466666666666667, |
|
"learning_rate_embeddings": 0.0002466666666666667, |
|
"loss": 2.2217, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 117.23, |
|
"learning_rate": 0.00024555555555555556, |
|
"learning_rate_embeddings": 0.00024555555555555556, |
|
"loss": 2.2117, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 117.65, |
|
"learning_rate": 0.00024444444444444443, |
|
"learning_rate_embeddings": 0.00024444444444444443, |
|
"loss": 2.2164, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 118.07, |
|
"learning_rate": 0.00024333333333333336, |
|
"learning_rate_embeddings": 0.00024333333333333336, |
|
"loss": 2.2186, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 118.49, |
|
"learning_rate": 0.00024222222222222223, |
|
"learning_rate_embeddings": 0.00024222222222222223, |
|
"loss": 2.1994, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 118.91, |
|
"learning_rate": 0.0002411111111111111, |
|
"learning_rate_embeddings": 0.0002411111111111111, |
|
"loss": 2.2184, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 119.33, |
|
"learning_rate": 0.00024, |
|
"learning_rate_embeddings": 0.00024, |
|
"loss": 2.2062, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 119.75, |
|
"learning_rate": 0.0002388888888888889, |
|
"learning_rate_embeddings": 0.0002388888888888889, |
|
"loss": 2.216, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 120.17, |
|
"learning_rate": 0.00023777777777777778, |
|
"learning_rate_embeddings": 0.00023777777777777778, |
|
"loss": 2.21, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 120.59, |
|
"learning_rate": 0.00023666666666666668, |
|
"learning_rate_embeddings": 0.00023666666666666668, |
|
"loss": 2.2112, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 121.01, |
|
"learning_rate": 0.00023555555555555556, |
|
"learning_rate_embeddings": 0.00023555555555555556, |
|
"loss": 2.2096, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 121.43, |
|
"learning_rate": 0.00023444444444444446, |
|
"learning_rate_embeddings": 0.00023444444444444446, |
|
"loss": 2.2044, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 121.85, |
|
"learning_rate": 0.00023333333333333333, |
|
"learning_rate_embeddings": 0.00023333333333333333, |
|
"loss": 2.2158, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 122.27, |
|
"learning_rate": 0.00023222222222222223, |
|
"learning_rate_embeddings": 0.00023222222222222223, |
|
"loss": 2.2093, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 122.69, |
|
"learning_rate": 0.0002311111111111111, |
|
"learning_rate_embeddings": 0.0002311111111111111, |
|
"loss": 2.2058, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 123.11, |
|
"learning_rate": 0.00023, |
|
"learning_rate_embeddings": 0.00023, |
|
"loss": 2.2102, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 123.53, |
|
"learning_rate": 0.0002288888888888889, |
|
"learning_rate_embeddings": 0.0002288888888888889, |
|
"loss": 2.1978, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 123.95, |
|
"learning_rate": 0.00022777777777777778, |
|
"learning_rate_embeddings": 0.00022777777777777778, |
|
"loss": 2.2105, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 124.37, |
|
"learning_rate": 0.00022666666666666666, |
|
"learning_rate_embeddings": 0.00022666666666666666, |
|
"loss": 2.1972, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 124.79, |
|
"learning_rate": 0.00022555555555555556, |
|
"learning_rate_embeddings": 0.00022555555555555556, |
|
"loss": 2.2071, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 125.21, |
|
"learning_rate": 0.00022444444444444446, |
|
"learning_rate_embeddings": 0.00022444444444444446, |
|
"loss": 2.2144, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 125.63, |
|
"learning_rate": 0.00022333333333333333, |
|
"learning_rate_embeddings": 0.00022333333333333333, |
|
"loss": 2.2037, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 126.05, |
|
"learning_rate": 0.0002222222222222222, |
|
"learning_rate_embeddings": 0.0002222222222222222, |
|
"loss": 2.206, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 126.05, |
|
"eval_loss": 2.3389413356781006, |
|
"eval_runtime": 117.7332, |
|
"eval_samples_per_second": 575.097, |
|
"eval_steps_per_second": 4.493, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 126.47, |
|
"learning_rate": 0.00022111111111111113, |
|
"learning_rate_embeddings": 0.00022111111111111113, |
|
"loss": 2.1956, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 126.89, |
|
"learning_rate": 0.00022, |
|
"learning_rate_embeddings": 0.00022, |
|
"loss": 2.2087, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 127.31, |
|
"learning_rate": 0.00021888888888888888, |
|
"learning_rate_embeddings": 0.00021888888888888888, |
|
"loss": 2.1942, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 127.73, |
|
"learning_rate": 0.00021777777777777776, |
|
"learning_rate_embeddings": 0.00021777777777777776, |
|
"loss": 2.206, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 128.15, |
|
"learning_rate": 0.00021666666666666668, |
|
"learning_rate_embeddings": 0.00021666666666666668, |
|
"loss": 2.2029, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 128.57, |
|
"learning_rate": 0.00021555555555555556, |
|
"learning_rate_embeddings": 0.00021555555555555556, |
|
"loss": 2.1988, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 128.99, |
|
"learning_rate": 0.00021444444444444443, |
|
"learning_rate_embeddings": 0.00021444444444444443, |
|
"loss": 2.2121, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 129.41, |
|
"learning_rate": 0.00021333333333333336, |
|
"learning_rate_embeddings": 0.00021333333333333336, |
|
"loss": 2.1833, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 129.83, |
|
"learning_rate": 0.00021222222222222223, |
|
"learning_rate_embeddings": 0.00021222222222222223, |
|
"loss": 2.2016, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 130.25, |
|
"learning_rate": 0.0002111111111111111, |
|
"learning_rate_embeddings": 0.0002111111111111111, |
|
"loss": 2.1974, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 130.67, |
|
"learning_rate": 0.00021, |
|
"learning_rate_embeddings": 0.00021, |
|
"loss": 2.1969, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 131.09, |
|
"learning_rate": 0.0002088888888888889, |
|
"learning_rate_embeddings": 0.0002088888888888889, |
|
"loss": 2.2003, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 131.51, |
|
"learning_rate": 0.00020777777777777778, |
|
"learning_rate_embeddings": 0.00020777777777777778, |
|
"loss": 2.1987, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 131.93, |
|
"learning_rate": 0.00020666666666666666, |
|
"learning_rate_embeddings": 0.00020666666666666666, |
|
"loss": 2.2038, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 132.35, |
|
"learning_rate": 0.00020555555555555556, |
|
"learning_rate_embeddings": 0.00020555555555555556, |
|
"loss": 2.189, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 132.77, |
|
"learning_rate": 0.00020444444444444446, |
|
"learning_rate_embeddings": 0.00020444444444444446, |
|
"loss": 2.2054, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 133.19, |
|
"learning_rate": 0.00020333333333333333, |
|
"learning_rate_embeddings": 0.00020333333333333333, |
|
"loss": 2.197, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 133.61, |
|
"learning_rate": 0.00020222222222222223, |
|
"learning_rate_embeddings": 0.00020222222222222223, |
|
"loss": 2.1855, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 134.03, |
|
"learning_rate": 0.0002011111111111111, |
|
"learning_rate_embeddings": 0.0002011111111111111, |
|
"loss": 2.2069, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 134.45, |
|
"learning_rate": 0.0002, |
|
"learning_rate_embeddings": 0.0002, |
|
"loss": 2.1969, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 134.87, |
|
"learning_rate": 0.00019888888888888888, |
|
"learning_rate_embeddings": 0.00019888888888888888, |
|
"loss": 2.1952, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 135.29, |
|
"learning_rate": 0.00019777777777777778, |
|
"learning_rate_embeddings": 0.00019777777777777778, |
|
"loss": 2.2002, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 135.71, |
|
"learning_rate": 0.00019666666666666666, |
|
"learning_rate_embeddings": 0.00019666666666666666, |
|
"loss": 2.1968, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 136.13, |
|
"learning_rate": 0.00019555555555555556, |
|
"learning_rate_embeddings": 0.00019555555555555556, |
|
"loss": 2.2008, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 136.55, |
|
"learning_rate": 0.00019444444444444446, |
|
"learning_rate_embeddings": 0.00019444444444444446, |
|
"loss": 2.1835, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 136.55, |
|
"eval_loss": 2.3406405448913574, |
|
"eval_runtime": 118.0375, |
|
"eval_samples_per_second": 573.614, |
|
"eval_steps_per_second": 4.482, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 136.97, |
|
"learning_rate": 0.00019333333333333333, |
|
"learning_rate_embeddings": 0.00019333333333333333, |
|
"loss": 2.2014, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 137.39, |
|
"learning_rate": 0.0001922222222222222, |
|
"learning_rate_embeddings": 0.0001922222222222222, |
|
"loss": 2.1768, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 137.81, |
|
"learning_rate": 0.00019111111111111114, |
|
"learning_rate_embeddings": 0.00019111111111111114, |
|
"loss": 2.19, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 138.24, |
|
"learning_rate": 0.00019, |
|
"learning_rate_embeddings": 0.00019, |
|
"loss": 2.185, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 138.66, |
|
"learning_rate": 0.00018888888888888888, |
|
"learning_rate_embeddings": 0.00018888888888888888, |
|
"loss": 2.1859, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 139.08, |
|
"learning_rate": 0.00018777777777777776, |
|
"learning_rate_embeddings": 0.00018777777777777776, |
|
"loss": 2.1926, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 139.5, |
|
"learning_rate": 0.0001866666666666667, |
|
"learning_rate_embeddings": 0.0001866666666666667, |
|
"loss": 2.1814, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 139.92, |
|
"learning_rate": 0.00018555555555555556, |
|
"learning_rate_embeddings": 0.00018555555555555556, |
|
"loss": 2.199, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 140.34, |
|
"learning_rate": 0.00018444444444444443, |
|
"learning_rate_embeddings": 0.00018444444444444443, |
|
"loss": 2.1957, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 140.76, |
|
"learning_rate": 0.00018333333333333334, |
|
"learning_rate_embeddings": 0.00018333333333333334, |
|
"loss": 2.1959, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 141.18, |
|
"learning_rate": 0.00018222222222222224, |
|
"learning_rate_embeddings": 0.00018222222222222224, |
|
"loss": 2.1904, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 141.6, |
|
"learning_rate": 0.0001811111111111111, |
|
"learning_rate_embeddings": 0.0001811111111111111, |
|
"loss": 2.1863, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 142.02, |
|
"learning_rate": 0.00017999999999999998, |
|
"learning_rate_embeddings": 0.00017999999999999998, |
|
"loss": 2.1972, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 142.44, |
|
"learning_rate": 0.00017888888888888889, |
|
"learning_rate_embeddings": 0.00017888888888888889, |
|
"loss": 2.1745, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 142.86, |
|
"learning_rate": 0.00017777777777777779, |
|
"learning_rate_embeddings": 0.00017777777777777779, |
|
"loss": 2.1828, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 143.28, |
|
"learning_rate": 0.00017666666666666666, |
|
"learning_rate_embeddings": 0.00017666666666666666, |
|
"loss": 2.1897, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 143.7, |
|
"learning_rate": 0.00017555555555555556, |
|
"learning_rate_embeddings": 0.00017555555555555556, |
|
"loss": 2.1803, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 144.12, |
|
"learning_rate": 0.00017444444444444446, |
|
"learning_rate_embeddings": 0.00017444444444444446, |
|
"loss": 2.1868, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 144.54, |
|
"learning_rate": 0.00017333333333333334, |
|
"learning_rate_embeddings": 0.00017333333333333334, |
|
"loss": 2.1727, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 144.96, |
|
"learning_rate": 0.00017222222222222224, |
|
"learning_rate_embeddings": 0.00017222222222222224, |
|
"loss": 2.1928, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 145.38, |
|
"learning_rate": 0.0001711111111111111, |
|
"learning_rate_embeddings": 0.0001711111111111111, |
|
"loss": 2.1804, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 145.8, |
|
"learning_rate": 0.00017, |
|
"learning_rate_embeddings": 0.00017, |
|
"loss": 2.1848, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 146.22, |
|
"learning_rate": 0.00016888888888888889, |
|
"learning_rate_embeddings": 0.00016888888888888889, |
|
"loss": 2.1702, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 146.64, |
|
"learning_rate": 0.0001677777777777778, |
|
"learning_rate_embeddings": 0.0001677777777777778, |
|
"loss": 2.178, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 147.06, |
|
"learning_rate": 0.00016666666666666666, |
|
"learning_rate_embeddings": 0.00016666666666666666, |
|
"loss": 2.1891, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 147.06, |
|
"eval_loss": 2.336824655532837, |
|
"eval_runtime": 117.8332, |
|
"eval_samples_per_second": 574.609, |
|
"eval_steps_per_second": 4.489, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 147.48, |
|
"learning_rate": 0.00016555555555555556, |
|
"learning_rate_embeddings": 0.00016555555555555556, |
|
"loss": 2.1814, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 147.9, |
|
"learning_rate": 0.00016444444444444446, |
|
"learning_rate_embeddings": 0.00016444444444444446, |
|
"loss": 2.1889, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 148.32, |
|
"learning_rate": 0.00016333333333333334, |
|
"learning_rate_embeddings": 0.00016333333333333334, |
|
"loss": 2.1768, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 148.74, |
|
"learning_rate": 0.0001622222222222222, |
|
"learning_rate_embeddings": 0.0001622222222222222, |
|
"loss": 2.1798, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 149.16, |
|
"learning_rate": 0.0001611111111111111, |
|
"learning_rate_embeddings": 0.0001611111111111111, |
|
"loss": 2.1849, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 149.58, |
|
"learning_rate": 0.00016, |
|
"learning_rate_embeddings": 0.00016, |
|
"loss": 2.1773, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 150.0, |
|
"learning_rate": 0.0001588888888888889, |
|
"learning_rate_embeddings": 0.0001588888888888889, |
|
"loss": 2.1832, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 150.42, |
|
"learning_rate": 0.00015777777777777776, |
|
"learning_rate_embeddings": 0.00015777777777777776, |
|
"loss": 2.1789, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 150.84, |
|
"learning_rate": 0.0001566666666666667, |
|
"learning_rate_embeddings": 0.0001566666666666667, |
|
"loss": 2.167, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 151.26, |
|
"learning_rate": 0.00015555555555555556, |
|
"learning_rate_embeddings": 0.00015555555555555556, |
|
"loss": 2.1787, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 151.68, |
|
"learning_rate": 0.00015444444444444444, |
|
"learning_rate_embeddings": 0.00015444444444444444, |
|
"loss": 2.1812, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 152.1, |
|
"learning_rate": 0.00015333333333333334, |
|
"learning_rate_embeddings": 0.00015333333333333334, |
|
"loss": 2.1847, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 152.52, |
|
"learning_rate": 0.00015222222222222224, |
|
"learning_rate_embeddings": 0.00015222222222222224, |
|
"loss": 2.168, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 152.94, |
|
"learning_rate": 0.0001511111111111111, |
|
"learning_rate_embeddings": 0.0001511111111111111, |
|
"loss": 2.1833, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 153.36, |
|
"learning_rate": 0.00015, |
|
"learning_rate_embeddings": 0.00015, |
|
"loss": 2.1714, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 153.78, |
|
"learning_rate": 0.0001488888888888889, |
|
"learning_rate_embeddings": 0.0001488888888888889, |
|
"loss": 2.1763, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 154.2, |
|
"learning_rate": 0.0001477777777777778, |
|
"learning_rate_embeddings": 0.0001477777777777778, |
|
"loss": 2.1729, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 154.62, |
|
"learning_rate": 0.00014666666666666666, |
|
"learning_rate_embeddings": 0.00014666666666666666, |
|
"loss": 2.1761, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 155.04, |
|
"learning_rate": 0.00014555555555555556, |
|
"learning_rate_embeddings": 0.00014555555555555556, |
|
"loss": 2.1786, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 155.46, |
|
"learning_rate": 0.00014444444444444444, |
|
"learning_rate_embeddings": 0.00014444444444444444, |
|
"loss": 2.1629, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 155.88, |
|
"learning_rate": 0.00014333333333333334, |
|
"learning_rate_embeddings": 0.00014333333333333334, |
|
"loss": 2.1791, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 156.3, |
|
"learning_rate": 0.0001422222222222222, |
|
"learning_rate_embeddings": 0.0001422222222222222, |
|
"loss": 2.1769, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 156.72, |
|
"learning_rate": 0.00014111111111111111, |
|
"learning_rate_embeddings": 0.00014111111111111111, |
|
"loss": 2.1755, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 157.14, |
|
"learning_rate": 0.00014000000000000001, |
|
"learning_rate_embeddings": 0.00014000000000000001, |
|
"loss": 2.1771, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 157.56, |
|
"learning_rate": 0.0001388888888888889, |
|
"learning_rate_embeddings": 0.0001388888888888889, |
|
"loss": 2.1667, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 157.56, |
|
"eval_loss": 2.3346025943756104, |
|
"eval_runtime": 118.1677, |
|
"eval_samples_per_second": 572.982, |
|
"eval_steps_per_second": 4.477, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 157.98, |
|
"learning_rate": 0.0001377777777777778, |
|
"learning_rate_embeddings": 0.0001377777777777778, |
|
"loss": 2.1747, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 158.4, |
|
"learning_rate": 0.00013666666666666666, |
|
"learning_rate_embeddings": 0.00013666666666666666, |
|
"loss": 2.1649, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 158.82, |
|
"learning_rate": 0.00013555555555555556, |
|
"learning_rate_embeddings": 0.00013555555555555556, |
|
"loss": 2.1755, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 159.24, |
|
"learning_rate": 0.00013444444444444447, |
|
"learning_rate_embeddings": 0.00013444444444444447, |
|
"loss": 2.1728, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 159.66, |
|
"learning_rate": 0.00013333333333333334, |
|
"learning_rate_embeddings": 0.00013333333333333334, |
|
"loss": 2.1705, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 160.08, |
|
"learning_rate": 0.00013222222222222221, |
|
"learning_rate_embeddings": 0.00013222222222222221, |
|
"loss": 2.1725, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 160.5, |
|
"learning_rate": 0.00013111111111111111, |
|
"learning_rate_embeddings": 0.00013111111111111111, |
|
"loss": 2.1645, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 160.92, |
|
"learning_rate": 0.00013000000000000002, |
|
"learning_rate_embeddings": 0.00013000000000000002, |
|
"loss": 2.1827, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 161.34, |
|
"learning_rate": 0.0001288888888888889, |
|
"learning_rate_embeddings": 0.0001288888888888889, |
|
"loss": 2.1649, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 161.76, |
|
"learning_rate": 0.00012777777777777776, |
|
"learning_rate_embeddings": 0.00012777777777777776, |
|
"loss": 2.1729, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 162.18, |
|
"learning_rate": 0.0001266666666666667, |
|
"learning_rate_embeddings": 0.0001266666666666667, |
|
"loss": 2.1746, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 162.6, |
|
"learning_rate": 0.00012555555555555557, |
|
"learning_rate_embeddings": 0.00012555555555555557, |
|
"loss": 2.1673, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 163.03, |
|
"learning_rate": 0.00012444444444444444, |
|
"learning_rate_embeddings": 0.00012444444444444444, |
|
"loss": 2.1736, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 163.45, |
|
"learning_rate": 0.00012333333333333334, |
|
"learning_rate_embeddings": 0.00012333333333333334, |
|
"loss": 2.1577, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 163.87, |
|
"learning_rate": 0.00012222222222222221, |
|
"learning_rate_embeddings": 0.00012222222222222221, |
|
"loss": 2.1663, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 164.29, |
|
"learning_rate": 0.00012111111111111112, |
|
"learning_rate_embeddings": 0.00012111111111111112, |
|
"loss": 2.1659, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 164.71, |
|
"learning_rate": 0.00012, |
|
"learning_rate_embeddings": 0.00012, |
|
"loss": 2.172, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 165.13, |
|
"learning_rate": 0.00011888888888888889, |
|
"learning_rate_embeddings": 0.00011888888888888889, |
|
"loss": 2.1727, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 165.55, |
|
"learning_rate": 0.00011777777777777778, |
|
"learning_rate_embeddings": 0.00011777777777777778, |
|
"loss": 2.1555, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 165.97, |
|
"learning_rate": 0.00011666666666666667, |
|
"learning_rate_embeddings": 0.00011666666666666667, |
|
"loss": 2.1692, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 166.39, |
|
"learning_rate": 0.00011555555555555555, |
|
"learning_rate_embeddings": 0.00011555555555555555, |
|
"loss": 2.1691, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 166.81, |
|
"learning_rate": 0.00011444444444444445, |
|
"learning_rate_embeddings": 0.00011444444444444445, |
|
"loss": 2.1659, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 167.23, |
|
"learning_rate": 0.00011333333333333333, |
|
"learning_rate_embeddings": 0.00011333333333333333, |
|
"loss": 2.1584, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 167.65, |
|
"learning_rate": 0.00011222222222222223, |
|
"learning_rate_embeddings": 0.00011222222222222223, |
|
"loss": 2.1605, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 168.07, |
|
"learning_rate": 0.0001111111111111111, |
|
"learning_rate_embeddings": 0.0001111111111111111, |
|
"loss": 2.1679, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 168.07, |
|
"eval_loss": 2.330087661743164, |
|
"eval_runtime": 117.8736, |
|
"eval_samples_per_second": 574.412, |
|
"eval_steps_per_second": 4.488, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 168.49, |
|
"learning_rate": 0.00011, |
|
"learning_rate_embeddings": 0.00011, |
|
"loss": 2.1569, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 168.91, |
|
"learning_rate": 0.00010888888888888888, |
|
"learning_rate_embeddings": 0.00010888888888888888, |
|
"loss": 2.1558, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 169.33, |
|
"learning_rate": 0.00010777777777777778, |
|
"learning_rate_embeddings": 0.00010777777777777778, |
|
"loss": 2.1498, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 169.75, |
|
"learning_rate": 0.00010666666666666668, |
|
"learning_rate_embeddings": 0.00010666666666666668, |
|
"loss": 2.1562, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 170.17, |
|
"learning_rate": 0.00010555555555555555, |
|
"learning_rate_embeddings": 0.00010555555555555555, |
|
"loss": 2.1641, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 170.59, |
|
"learning_rate": 0.00010444444444444445, |
|
"learning_rate_embeddings": 0.00010444444444444445, |
|
"loss": 2.1579, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 171.01, |
|
"learning_rate": 0.00010333333333333333, |
|
"learning_rate_embeddings": 0.00010333333333333333, |
|
"loss": 2.1755, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 171.43, |
|
"learning_rate": 0.00010222222222222223, |
|
"learning_rate_embeddings": 0.00010222222222222223, |
|
"loss": 2.1546, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 171.85, |
|
"learning_rate": 0.00010111111111111112, |
|
"learning_rate_embeddings": 0.00010111111111111112, |
|
"loss": 2.1662, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 172.27, |
|
"learning_rate": 0.0001, |
|
"learning_rate_embeddings": 0.0001, |
|
"loss": 2.1624, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 172.69, |
|
"learning_rate": 9.888888888888889e-05, |
|
"learning_rate_embeddings": 9.888888888888889e-05, |
|
"loss": 2.1639, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 173.11, |
|
"learning_rate": 9.777777777777778e-05, |
|
"learning_rate_embeddings": 9.777777777777778e-05, |
|
"loss": 2.1687, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 173.53, |
|
"learning_rate": 9.666666666666667e-05, |
|
"learning_rate_embeddings": 9.666666666666667e-05, |
|
"loss": 2.165, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 173.95, |
|
"learning_rate": 9.555555555555557e-05, |
|
"learning_rate_embeddings": 9.555555555555557e-05, |
|
"loss": 2.1599, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 174.37, |
|
"learning_rate": 9.444444444444444e-05, |
|
"learning_rate_embeddings": 9.444444444444444e-05, |
|
"loss": 2.1465, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 174.79, |
|
"learning_rate": 9.333333333333334e-05, |
|
"learning_rate_embeddings": 9.333333333333334e-05, |
|
"loss": 2.1523, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 175.21, |
|
"learning_rate": 9.222222222222222e-05, |
|
"learning_rate_embeddings": 9.222222222222222e-05, |
|
"loss": 2.1502, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 175.63, |
|
"learning_rate": 9.111111111111112e-05, |
|
"learning_rate_embeddings": 9.111111111111112e-05, |
|
"loss": 2.153, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 176.05, |
|
"learning_rate": 8.999999999999999e-05, |
|
"learning_rate_embeddings": 8.999999999999999e-05, |
|
"loss": 2.1602, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 176.47, |
|
"learning_rate": 8.888888888888889e-05, |
|
"learning_rate_embeddings": 8.888888888888889e-05, |
|
"loss": 2.1502, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 176.89, |
|
"learning_rate": 8.777777777777778e-05, |
|
"learning_rate_embeddings": 8.777777777777778e-05, |
|
"loss": 2.1581, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 177.31, |
|
"learning_rate": 8.666666666666667e-05, |
|
"learning_rate_embeddings": 8.666666666666667e-05, |
|
"loss": 2.1499, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 177.73, |
|
"learning_rate": 8.555555555555556e-05, |
|
"learning_rate_embeddings": 8.555555555555556e-05, |
|
"loss": 2.145, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 178.15, |
|
"learning_rate": 8.444444444444444e-05, |
|
"learning_rate_embeddings": 8.444444444444444e-05, |
|
"loss": 2.1601, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 178.57, |
|
"learning_rate": 8.333333333333333e-05, |
|
"learning_rate_embeddings": 8.333333333333333e-05, |
|
"loss": 2.1498, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 178.57, |
|
"eval_loss": 2.323934555053711, |
|
"eval_runtime": 118.086, |
|
"eval_samples_per_second": 573.379, |
|
"eval_steps_per_second": 4.48, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 178.99, |
|
"learning_rate": 8.222222222222223e-05, |
|
"learning_rate_embeddings": 8.222222222222223e-05, |
|
"loss": 2.1592, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 179.41, |
|
"learning_rate": 8.11111111111111e-05, |
|
"learning_rate_embeddings": 8.11111111111111e-05, |
|
"loss": 2.1557, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 179.83, |
|
"learning_rate": 8e-05, |
|
"learning_rate_embeddings": 8e-05, |
|
"loss": 2.154, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 180.25, |
|
"learning_rate": 7.888888888888888e-05, |
|
"learning_rate_embeddings": 7.888888888888888e-05, |
|
"loss": 2.1584, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 180.67, |
|
"learning_rate": 7.777777777777778e-05, |
|
"learning_rate_embeddings": 7.777777777777778e-05, |
|
"loss": 2.1604, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 181.09, |
|
"learning_rate": 7.666666666666667e-05, |
|
"learning_rate_embeddings": 7.666666666666667e-05, |
|
"loss": 2.1608, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 181.51, |
|
"learning_rate": 7.555555555555556e-05, |
|
"learning_rate_embeddings": 7.555555555555556e-05, |
|
"loss": 2.1534, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 181.93, |
|
"learning_rate": 7.444444444444444e-05, |
|
"learning_rate_embeddings": 7.444444444444444e-05, |
|
"loss": 2.1523, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 182.35, |
|
"learning_rate": 7.333333333333333e-05, |
|
"learning_rate_embeddings": 7.333333333333333e-05, |
|
"loss": 2.1446, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 182.77, |
|
"learning_rate": 7.222222222222222e-05, |
|
"learning_rate_embeddings": 7.222222222222222e-05, |
|
"loss": 2.1628, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 183.19, |
|
"learning_rate": 7.11111111111111e-05, |
|
"learning_rate_embeddings": 7.11111111111111e-05, |
|
"loss": 2.1528, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 183.61, |
|
"learning_rate": 7.000000000000001e-05, |
|
"learning_rate_embeddings": 7.000000000000001e-05, |
|
"loss": 2.15, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 184.03, |
|
"learning_rate": 6.88888888888889e-05, |
|
"learning_rate_embeddings": 6.88888888888889e-05, |
|
"loss": 2.1555, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 184.45, |
|
"learning_rate": 6.777777777777778e-05, |
|
"learning_rate_embeddings": 6.777777777777778e-05, |
|
"loss": 2.1445, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 184.87, |
|
"learning_rate": 6.666666666666667e-05, |
|
"learning_rate_embeddings": 6.666666666666667e-05, |
|
"loss": 2.1531, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 185.29, |
|
"learning_rate": 6.555555555555556e-05, |
|
"learning_rate_embeddings": 6.555555555555556e-05, |
|
"loss": 2.1524, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 185.71, |
|
"learning_rate": 6.444444444444444e-05, |
|
"learning_rate_embeddings": 6.444444444444444e-05, |
|
"loss": 2.1533, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 186.13, |
|
"learning_rate": 6.333333333333335e-05, |
|
"learning_rate_embeddings": 6.333333333333335e-05, |
|
"loss": 2.1517, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 186.55, |
|
"learning_rate": 6.222222222222222e-05, |
|
"learning_rate_embeddings": 6.222222222222222e-05, |
|
"loss": 2.147, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 186.97, |
|
"learning_rate": 6.111111111111111e-05, |
|
"learning_rate_embeddings": 6.111111111111111e-05, |
|
"loss": 2.1503, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 187.39, |
|
"learning_rate": 6e-05, |
|
"learning_rate_embeddings": 6e-05, |
|
"loss": 2.1482, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 187.81, |
|
"learning_rate": 5.888888888888889e-05, |
|
"learning_rate_embeddings": 5.888888888888889e-05, |
|
"loss": 2.1409, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 188.24, |
|
"learning_rate": 5.7777777777777776e-05, |
|
"learning_rate_embeddings": 5.7777777777777776e-05, |
|
"loss": 2.1478, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 188.66, |
|
"learning_rate": 5.6666666666666664e-05, |
|
"learning_rate_embeddings": 5.6666666666666664e-05, |
|
"loss": 2.1479, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 189.08, |
|
"learning_rate": 5.555555555555555e-05, |
|
"learning_rate_embeddings": 5.555555555555555e-05, |
|
"loss": 2.1633, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 189.08, |
|
"eval_loss": 2.321134567260742, |
|
"eval_runtime": 117.9835, |
|
"eval_samples_per_second": 573.877, |
|
"eval_steps_per_second": 4.484, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 189.5, |
|
"learning_rate": 5.444444444444444e-05, |
|
"learning_rate_embeddings": 5.444444444444444e-05, |
|
"loss": 2.1425, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 189.92, |
|
"learning_rate": 5.333333333333334e-05, |
|
"learning_rate_embeddings": 5.333333333333334e-05, |
|
"loss": 2.1498, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 190.34, |
|
"learning_rate": 5.222222222222223e-05, |
|
"learning_rate_embeddings": 5.222222222222223e-05, |
|
"loss": 2.1415, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 190.76, |
|
"learning_rate": 5.1111111111111115e-05, |
|
"learning_rate_embeddings": 5.1111111111111115e-05, |
|
"loss": 2.145, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 191.18, |
|
"learning_rate": 5e-05, |
|
"learning_rate_embeddings": 5e-05, |
|
"loss": 2.1516, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 191.6, |
|
"learning_rate": 4.888888888888889e-05, |
|
"learning_rate_embeddings": 4.888888888888889e-05, |
|
"loss": 2.1514, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 192.02, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"learning_rate_embeddings": 4.7777777777777784e-05, |
|
"loss": 2.14, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 192.44, |
|
"learning_rate": 4.666666666666667e-05, |
|
"learning_rate_embeddings": 4.666666666666667e-05, |
|
"loss": 2.1454, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 192.86, |
|
"learning_rate": 4.555555555555556e-05, |
|
"learning_rate_embeddings": 4.555555555555556e-05, |
|
"loss": 2.1453, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 193.28, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"learning_rate_embeddings": 4.4444444444444447e-05, |
|
"loss": 2.151, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 193.7, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"learning_rate_embeddings": 4.3333333333333334e-05, |
|
"loss": 2.144, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 194.12, |
|
"learning_rate": 4.222222222222222e-05, |
|
"learning_rate_embeddings": 4.222222222222222e-05, |
|
"loss": 2.1479, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 194.54, |
|
"learning_rate": 4.1111111111111116e-05, |
|
"learning_rate_embeddings": 4.1111111111111116e-05, |
|
"loss": 2.1408, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 194.96, |
|
"learning_rate": 4e-05, |
|
"learning_rate_embeddings": 4e-05, |
|
"loss": 2.1473, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 195.38, |
|
"learning_rate": 3.888888888888889e-05, |
|
"learning_rate_embeddings": 3.888888888888889e-05, |
|
"loss": 2.1388, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 195.8, |
|
"learning_rate": 3.777777777777778e-05, |
|
"learning_rate_embeddings": 3.777777777777778e-05, |
|
"loss": 2.1355, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 196.22, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"learning_rate_embeddings": 3.6666666666666666e-05, |
|
"loss": 2.1448, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 196.64, |
|
"learning_rate": 3.555555555555555e-05, |
|
"learning_rate_embeddings": 3.555555555555555e-05, |
|
"loss": 2.136, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 197.06, |
|
"learning_rate": 3.444444444444445e-05, |
|
"learning_rate_embeddings": 3.444444444444445e-05, |
|
"loss": 2.1362, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 197.48, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"learning_rate_embeddings": 3.3333333333333335e-05, |
|
"loss": 2.1379, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 197.9, |
|
"learning_rate": 3.222222222222222e-05, |
|
"learning_rate_embeddings": 3.222222222222222e-05, |
|
"loss": 2.1405, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 198.32, |
|
"learning_rate": 3.111111111111111e-05, |
|
"learning_rate_embeddings": 3.111111111111111e-05, |
|
"loss": 2.1439, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 198.74, |
|
"learning_rate": 3e-05, |
|
"learning_rate_embeddings": 3e-05, |
|
"loss": 2.1311, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 199.16, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"learning_rate_embeddings": 2.8888888888888888e-05, |
|
"loss": 2.1411, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 199.58, |
|
"learning_rate": 2.7777777777777776e-05, |
|
"learning_rate_embeddings": 2.7777777777777776e-05, |
|
"loss": 2.1369, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 199.58, |
|
"eval_loss": 2.3181934356689453, |
|
"eval_runtime": 117.8788, |
|
"eval_samples_per_second": 574.387, |
|
"eval_steps_per_second": 4.488, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 200.0, |
|
"learning_rate": 2.666666666666667e-05, |
|
"learning_rate_embeddings": 2.666666666666667e-05, |
|
"loss": 2.143, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 200.42, |
|
"learning_rate": 2.5555555555555557e-05, |
|
"learning_rate_embeddings": 2.5555555555555557e-05, |
|
"loss": 2.134, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 200.84, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"learning_rate_embeddings": 2.4444444444444445e-05, |
|
"loss": 2.1366, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 201.26, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"learning_rate_embeddings": 2.3333333333333336e-05, |
|
"loss": 2.1355, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 201.68, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"learning_rate_embeddings": 2.2222222222222223e-05, |
|
"loss": 2.1378, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 202.1, |
|
"learning_rate": 2.111111111111111e-05, |
|
"learning_rate_embeddings": 2.111111111111111e-05, |
|
"loss": 2.1364, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 202.52, |
|
"learning_rate": 2e-05, |
|
"learning_rate_embeddings": 2e-05, |
|
"loss": 2.1378, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 202.94, |
|
"learning_rate": 1.888888888888889e-05, |
|
"learning_rate_embeddings": 1.888888888888889e-05, |
|
"loss": 2.1364, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 203.36, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"learning_rate_embeddings": 1.7777777777777777e-05, |
|
"loss": 2.1386, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 203.78, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"learning_rate_embeddings": 1.6666666666666667e-05, |
|
"loss": 2.1351, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 204.2, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"learning_rate_embeddings": 1.5555555555555555e-05, |
|
"loss": 2.1299, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 204.62, |
|
"learning_rate": 1.4444444444444444e-05, |
|
"learning_rate_embeddings": 1.4444444444444444e-05, |
|
"loss": 2.1334, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 205.04, |
|
"learning_rate": 1.3333333333333335e-05, |
|
"learning_rate_embeddings": 1.3333333333333335e-05, |
|
"loss": 2.1388, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 205.46, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"learning_rate_embeddings": 1.2222222222222222e-05, |
|
"loss": 2.126, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 205.88, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"learning_rate_embeddings": 1.1111111111111112e-05, |
|
"loss": 2.1353, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 206.3, |
|
"learning_rate": 1e-05, |
|
"learning_rate_embeddings": 1e-05, |
|
"loss": 2.1333, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 206.72, |
|
"learning_rate": 8.888888888888888e-06, |
|
"learning_rate_embeddings": 8.888888888888888e-06, |
|
"loss": 2.1291, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 207.14, |
|
"learning_rate": 7.777777777777777e-06, |
|
"learning_rate_embeddings": 7.777777777777777e-06, |
|
"loss": 2.1352, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 207.56, |
|
"learning_rate": 6.6666666666666675e-06, |
|
"learning_rate_embeddings": 6.6666666666666675e-06, |
|
"loss": 2.1403, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 207.98, |
|
"learning_rate": 5.555555555555556e-06, |
|
"learning_rate_embeddings": 5.555555555555556e-06, |
|
"loss": 2.1278, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 208.4, |
|
"learning_rate": 4.444444444444444e-06, |
|
"learning_rate_embeddings": 4.444444444444444e-06, |
|
"loss": 2.1254, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 208.82, |
|
"learning_rate": 3.3333333333333337e-06, |
|
"learning_rate_embeddings": 3.3333333333333337e-06, |
|
"loss": 2.1348, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 209.24, |
|
"learning_rate": 2.222222222222222e-06, |
|
"learning_rate_embeddings": 2.222222222222222e-06, |
|
"loss": 2.1306, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 209.66, |
|
"learning_rate": 1.111111111111111e-06, |
|
"learning_rate_embeddings": 1.111111111111111e-06, |
|
"loss": 2.1266, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 210.08, |
|
"learning_rate": 0.0, |
|
"learning_rate_embeddings": 0.0, |
|
"loss": 2.1316, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 210.08, |
|
"eval_loss": 2.3166539669036865, |
|
"eval_runtime": 117.9198, |
|
"eval_samples_per_second": 574.187, |
|
"eval_steps_per_second": 4.486, |
|
"step": 250000 |
|
} |
|
], |
|
"max_steps": 250000, |
|
"num_train_epochs": 211, |
|
"total_flos": 4.18242839445504e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|