Yova's picture
Training in progress, step 174, checkpoint
4de6a77 verified
{
"best_metric": 1.7786831855773926,
"best_model_checkpoint": "$../logs/03-08-2024T09-55-11-detailclip-b32/checkpoint-116",
"epoch": 1.5130434782608697,
"eval_steps": 58,
"global_step": 174,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05217391304347826,
"grad_norm": 2.7094666957855225,
"learning_rate": 1.2e-05,
"loss": 0.7099,
"step": 6
},
{
"epoch": 0.10434782608695652,
"grad_norm": 2.6887893676757812,
"learning_rate": 2.4e-05,
"loss": 0.6948,
"step": 12
},
{
"epoch": 0.1565217391304348,
"grad_norm": 2.56785249710083,
"learning_rate": 3.6e-05,
"loss": 0.7276,
"step": 18
},
{
"epoch": 0.20869565217391303,
"grad_norm": 2.413269281387329,
"learning_rate": 4.8e-05,
"loss": 0.5937,
"step": 24
},
{
"epoch": 0.2608695652173913,
"grad_norm": 2.0822973251342773,
"learning_rate": 6e-05,
"loss": 0.4702,
"step": 30
},
{
"epoch": 0.3130434782608696,
"grad_norm": 1.9383469820022583,
"learning_rate": 7.2e-05,
"loss": 0.4579,
"step": 36
},
{
"epoch": 0.3652173913043478,
"grad_norm": 1.6925252676010132,
"learning_rate": 8.4e-05,
"loss": 0.4028,
"step": 42
},
{
"epoch": 0.41739130434782606,
"grad_norm": 1.5693070888519287,
"learning_rate": 9.6e-05,
"loss": 0.3466,
"step": 48
},
{
"epoch": 0.46956521739130436,
"grad_norm": 1.8624712228775024,
"learning_rate": 9.999673735634258e-05,
"loss": 0.31,
"step": 54
},
{
"epoch": 0.5043478260869565,
"eval_loss": 1.88290536403656,
"eval_runtime": 371.0087,
"eval_samples_per_second": 6.021,
"eval_steps_per_second": 0.049,
"step": 58
},
{
"epoch": 0.5043478260869565,
"neg_increments": 0.40031376906933136,
"positive_increments": 0.6000771337979885,
"rate_increments": 0.60599292530573,
"step": 58
},
{
"epoch": 0.5217391304347826,
"grad_norm": 1.565119743347168,
"learning_rate": 9.997960964140947e-05,
"loss": 0.289,
"step": 60
},
{
"epoch": 0.5739130434782609,
"grad_norm": 1.695056438446045,
"learning_rate": 9.994780621691156e-05,
"loss": 0.2486,
"step": 66
},
{
"epoch": 0.6260869565217392,
"grad_norm": 1.3525909185409546,
"learning_rate": 9.990133642141359e-05,
"loss": 0.2476,
"step": 72
},
{
"epoch": 0.6782608695652174,
"grad_norm": 1.7046144008636475,
"learning_rate": 9.984021390002458e-05,
"loss": 0.2677,
"step": 78
},
{
"epoch": 0.7304347826086957,
"grad_norm": 1.199271559715271,
"learning_rate": 9.976445660039118e-05,
"loss": 0.2425,
"step": 84
},
{
"epoch": 0.782608695652174,
"grad_norm": 1.1520024538040161,
"learning_rate": 9.967408676742751e-05,
"loss": 0.2444,
"step": 90
},
{
"epoch": 0.8347826086956521,
"grad_norm": 1.023667335510254,
"learning_rate": 9.956913093678348e-05,
"loss": 0.2127,
"step": 96
},
{
"epoch": 0.8869565217391304,
"grad_norm": 1.418879747390747,
"learning_rate": 9.944961992705288e-05,
"loss": 0.2375,
"step": 102
},
{
"epoch": 0.9391304347826087,
"grad_norm": 1.6150606870651245,
"learning_rate": 9.931558883072403e-05,
"loss": 0.2214,
"step": 108
},
{
"epoch": 0.991304347826087,
"grad_norm": 1.3231348991394043,
"learning_rate": 9.916707700387547e-05,
"loss": 0.2283,
"step": 114
},
{
"epoch": 1.008695652173913,
"eval_loss": 1.7786831855773926,
"eval_runtime": 352.6788,
"eval_samples_per_second": 6.334,
"eval_steps_per_second": 0.051,
"step": 116
},
{
"epoch": 1.008695652173913,
"neg_increments": 0.38078110707552626,
"positive_increments": 0.6381213280123575,
"rate_increments": 0.6405809892973509,
"step": 116
},
{
"epoch": 1.0434782608695652,
"grad_norm": 1.1350363492965698,
"learning_rate": 9.900412805461967e-05,
"loss": 0.2019,
"step": 120
},
{
"epoch": 1.0956521739130434,
"grad_norm": 1.305219054222107,
"learning_rate": 9.882678983029819e-05,
"loss": 0.1942,
"step": 126
},
{
"epoch": 1.1478260869565218,
"grad_norm": 0.8753030300140381,
"learning_rate": 9.863511440343206e-05,
"loss": 0.19,
"step": 132
},
{
"epoch": 1.2,
"grad_norm": 1.1253970861434937,
"learning_rate": 9.842915805643155e-05,
"loss": 0.1917,
"step": 138
},
{
"epoch": 1.2521739130434781,
"grad_norm": 1.4084516763687134,
"learning_rate": 9.820898126506979e-05,
"loss": 0.1898,
"step": 144
},
{
"epoch": 1.3043478260869565,
"grad_norm": 1.483275294303894,
"learning_rate": 9.797464868072488e-05,
"loss": 0.2025,
"step": 150
},
{
"epoch": 1.3565217391304347,
"grad_norm": 1.4686787128448486,
"learning_rate": 9.772622911139622e-05,
"loss": 0.1918,
"step": 156
},
{
"epoch": 1.4086956521739131,
"grad_norm": 1.3151410818099976,
"learning_rate": 9.746379550150009e-05,
"loss": 0.222,
"step": 162
},
{
"epoch": 1.4608695652173913,
"grad_norm": 1.1024744510650635,
"learning_rate": 9.71874249104506e-05,
"loss": 0.179,
"step": 168
},
{
"epoch": 1.5130434782608697,
"grad_norm": 1.1871354579925537,
"learning_rate": 9.68971984900326e-05,
"loss": 0.1768,
"step": 174
},
{
"epoch": 1.5130434782608697,
"eval_loss": 1.8319995403289795,
"eval_runtime": 375.6242,
"eval_samples_per_second": 5.947,
"eval_steps_per_second": 0.048,
"step": 174
},
{
"epoch": 1.5130434782608697,
"neg_increments": 0.36799247711332744,
"positive_increments": 0.6630822809526313,
"rate_increments": 0.6599125175329515,
"step": 174
}
],
"logging_steps": 6,
"max_steps": 1150,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 58,
"total_flos": 3942368157607110.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}