|
{ |
|
"best_metric": null, |
|
"best_metric_at_epoch": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 14.598540145985401, |
|
"global_step": 36000, |
|
"latest_metric": null, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.9324141659908085e-05, |
|
"loss": 1.7428, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.864828331981617e-05, |
|
"loss": 1.2225, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.797242497972425e-05, |
|
"loss": 1.0975, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.729656663963233e-05, |
|
"loss": 1.0237, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.6620708299540416e-05, |
|
"loss": 0.9755, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.59448499594485e-05, |
|
"loss": 0.9379, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.526899161935659e-05, |
|
"loss": 0.9069, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.459313327926467e-05, |
|
"loss": 0.882, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 4.3917274939172754e-05, |
|
"loss": 0.8649, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.3241416599080836e-05, |
|
"loss": 0.8487, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 4.256555825898892e-05, |
|
"loss": 0.8321, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.1889699918897e-05, |
|
"loss": 0.818, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 4.1213841578805084e-05, |
|
"loss": 0.8052, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 4.053798323871317e-05, |
|
"loss": 0.7936, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 3.986212489862125e-05, |
|
"loss": 0.7868, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.918626655852933e-05, |
|
"loss": 0.7777, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 3.8510408218437415e-05, |
|
"loss": 0.7685, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 3.78345498783455e-05, |
|
"loss": 0.7632, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.85, |
|
"learning_rate": 3.715869153825358e-05, |
|
"loss": 0.754, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.648283319816166e-05, |
|
"loss": 0.7486, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 3.580697485806975e-05, |
|
"loss": 0.742, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 3.5131116517977835e-05, |
|
"loss": 0.7353, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 3.445525817788592e-05, |
|
"loss": 0.7304, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 3.3779399837794e-05, |
|
"loss": 0.7271, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 3.3103541497702084e-05, |
|
"loss": 0.7213, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 3.2427683157610166e-05, |
|
"loss": 0.7156, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 3.175182481751825e-05, |
|
"loss": 0.7111, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 5.68, |
|
"learning_rate": 3.107596647742633e-05, |
|
"loss": 0.7087, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 3.0400108137334414e-05, |
|
"loss": 0.7067, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 2.9724249797242497e-05, |
|
"loss": 0.7015, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 6.29, |
|
"learning_rate": 2.904839145715058e-05, |
|
"loss": 0.6951, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 6.49, |
|
"learning_rate": 2.8372533117058663e-05, |
|
"loss": 0.6922, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 6.69, |
|
"learning_rate": 2.7696674776966745e-05, |
|
"loss": 0.6917, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 2.7020816436874835e-05, |
|
"loss": 0.6869, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 2.6344958096782917e-05, |
|
"loss": 0.6834, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 2.5669099756691e-05, |
|
"loss": 0.6819, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"learning_rate": 2.499324141659908e-05, |
|
"loss": 0.6801, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 7.7, |
|
"learning_rate": 2.4317383076507166e-05, |
|
"loss": 0.6755, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 2.3641524736415248e-05, |
|
"loss": 0.6734, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 8.11, |
|
"learning_rate": 2.296566639632333e-05, |
|
"loss": 0.6706, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 8.31, |
|
"learning_rate": 2.2289808056231414e-05, |
|
"loss": 0.6688, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 8.52, |
|
"learning_rate": 2.1613949716139496e-05, |
|
"loss": 0.6662, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 8.72, |
|
"learning_rate": 2.093809137604758e-05, |
|
"loss": 0.6636, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 2.0262233035955662e-05, |
|
"loss": 0.6622, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 9.12, |
|
"learning_rate": 1.9586374695863748e-05, |
|
"loss": 0.6593, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 1.891051635577183e-05, |
|
"loss": 0.6568, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 1.8234658015679913e-05, |
|
"loss": 0.6563, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 1.7558799675587996e-05, |
|
"loss": 0.6542, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 9.94, |
|
"learning_rate": 1.688294133549608e-05, |
|
"loss": 0.6535, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 10.14, |
|
"learning_rate": 1.620708299540416e-05, |
|
"loss": 0.6486, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 10.34, |
|
"learning_rate": 1.5531224655312247e-05, |
|
"loss": 0.6485, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 10.54, |
|
"learning_rate": 1.4855366315220332e-05, |
|
"loss": 0.6474, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"learning_rate": 1.4179507975128415e-05, |
|
"loss": 0.647, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 10.95, |
|
"learning_rate": 1.3503649635036497e-05, |
|
"loss": 0.6451, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 11.15, |
|
"learning_rate": 1.282779129494458e-05, |
|
"loss": 0.6426, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 11.35, |
|
"learning_rate": 1.2151932954852663e-05, |
|
"loss": 0.6431, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 1.1476074614760747e-05, |
|
"loss": 0.641, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 11.76, |
|
"learning_rate": 1.080021627466883e-05, |
|
"loss": 0.6414, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 11.96, |
|
"learning_rate": 1.0124357934576913e-05, |
|
"loss": 0.6382, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 12.17, |
|
"learning_rate": 9.448499594484997e-06, |
|
"loss": 0.635, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 12.37, |
|
"learning_rate": 8.77264125439308e-06, |
|
"loss": 0.6357, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 12.57, |
|
"learning_rate": 8.096782914301162e-06, |
|
"loss": 0.634, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 12.77, |
|
"learning_rate": 7.420924574209247e-06, |
|
"loss": 0.6338, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 12.98, |
|
"learning_rate": 6.7450662341173294e-06, |
|
"loss": 0.633, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 6.069207894025412e-06, |
|
"loss": 0.6301, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 13.38, |
|
"learning_rate": 5.393349553933496e-06, |
|
"loss": 0.6313, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 13.58, |
|
"learning_rate": 4.717491213841578e-06, |
|
"loss": 0.6294, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 13.79, |
|
"learning_rate": 4.041632873749662e-06, |
|
"loss": 0.6311, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 13.99, |
|
"learning_rate": 3.365774533657746e-06, |
|
"loss": 0.631, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 14.19, |
|
"learning_rate": 2.6899161935658286e-06, |
|
"loss": 0.6274, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 2.014057853473912e-06, |
|
"loss": 0.6274, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 14.6, |
|
"learning_rate": 1.3381995133819951e-06, |
|
"loss": 0.6273, |
|
"step": 36000 |
|
} |
|
], |
|
"max_steps": 36990, |
|
"num_train_epochs": 15, |
|
"start_time": 1624121724.3048394, |
|
"total_flos": 0 |
|
} |
|
|