pygemma / checkpoint-531 /trainer_state.json
Menouar's picture
Upload folder using huggingface_hub
d03efad verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9981185324553152,
"eval_steps": 500,
"global_step": 531,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 2.453125,
"learning_rate": 2.5e-06,
"loss": 1.6646,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 2.71875,
"learning_rate": 5e-06,
"loss": 1.633,
"step": 20
},
{
"epoch": 0.11,
"grad_norm": 1.453125,
"learning_rate": 7.500000000000001e-06,
"loss": 1.5999,
"step": 30
},
{
"epoch": 0.15,
"grad_norm": 1.921875,
"learning_rate": 1e-05,
"loss": 1.6187,
"step": 40
},
{
"epoch": 0.19,
"grad_norm": 2.625,
"learning_rate": 1.25e-05,
"loss": 1.5918,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 0.59765625,
"learning_rate": 1.5000000000000002e-05,
"loss": 1.5936,
"step": 60
},
{
"epoch": 0.26,
"grad_norm": 2.015625,
"learning_rate": 1.7500000000000002e-05,
"loss": 1.5263,
"step": 70
},
{
"epoch": 0.3,
"grad_norm": 0.92578125,
"learning_rate": 2e-05,
"loss": 1.4784,
"step": 80
},
{
"epoch": 0.34,
"grad_norm": 0.474609375,
"learning_rate": 1.999034865600726e-05,
"loss": 1.3839,
"step": 90
},
{
"epoch": 0.38,
"grad_norm": 1.2109375,
"learning_rate": 1.9961413253717214e-05,
"loss": 1.3698,
"step": 100
},
{
"epoch": 0.41,
"grad_norm": 1.0390625,
"learning_rate": 1.9913249646234072e-05,
"loss": 1.2916,
"step": 110
},
{
"epoch": 0.45,
"grad_norm": 0.35546875,
"learning_rate": 1.9845950802266584e-05,
"loss": 1.2607,
"step": 120
},
{
"epoch": 0.49,
"grad_norm": 0.375,
"learning_rate": 1.9759646626673445e-05,
"loss": 1.2405,
"step": 130
},
{
"epoch": 0.53,
"grad_norm": 0.8828125,
"learning_rate": 1.9654503709711984e-05,
"loss": 1.2615,
"step": 140
},
{
"epoch": 0.56,
"grad_norm": 1.2421875,
"learning_rate": 1.9530725005474195e-05,
"loss": 1.1433,
"step": 150
},
{
"epoch": 0.6,
"grad_norm": 0.404296875,
"learning_rate": 1.93885494401308e-05,
"loss": 1.1599,
"step": 160
},
{
"epoch": 0.64,
"grad_norm": 0.91796875,
"learning_rate": 1.9228251450739495e-05,
"loss": 1.1325,
"step": 170
},
{
"epoch": 0.68,
"grad_norm": 0.55859375,
"learning_rate": 1.905014045550767e-05,
"loss": 1.0797,
"step": 180
},
{
"epoch": 0.71,
"grad_norm": 0.375,
"learning_rate": 1.8854560256532098e-05,
"loss": 1.041,
"step": 190
},
{
"epoch": 0.75,
"grad_norm": 1.2265625,
"learning_rate": 1.8641888376168483e-05,
"loss": 1.0286,
"step": 200
},
{
"epoch": 0.79,
"grad_norm": 0.287109375,
"learning_rate": 1.8412535328311813e-05,
"loss": 0.9831,
"step": 210
},
{
"epoch": 0.83,
"grad_norm": 0.28515625,
"learning_rate": 1.816694382599422e-05,
"loss": 0.9927,
"step": 220
},
{
"epoch": 0.87,
"grad_norm": 3.25,
"learning_rate": 1.7905587926829815e-05,
"loss": 1.0075,
"step": 230
},
{
"epoch": 0.9,
"grad_norm": 0.255859375,
"learning_rate": 1.762897211795607e-05,
"loss": 1.0232,
"step": 240
},
{
"epoch": 0.94,
"grad_norm": 0.6484375,
"learning_rate": 1.733763034223804e-05,
"loss": 0.9476,
"step": 250
},
{
"epoch": 0.98,
"grad_norm": 0.67578125,
"learning_rate": 1.7032124967615112e-05,
"loss": 0.9338,
"step": 260
},
{
"epoch": 1.02,
"grad_norm": 0.41796875,
"learning_rate": 1.6713045701579705e-05,
"loss": 0.9243,
"step": 270
},
{
"epoch": 1.05,
"grad_norm": 0.232421875,
"learning_rate": 1.638100845288331e-05,
"loss": 0.9084,
"step": 280
},
{
"epoch": 1.09,
"grad_norm": 0.349609375,
"learning_rate": 1.6036654142667043e-05,
"loss": 0.896,
"step": 290
},
{
"epoch": 1.13,
"grad_norm": 0.25390625,
"learning_rate": 1.568064746731156e-05,
"loss": 0.9311,
"step": 300
},
{
"epoch": 1.17,
"grad_norm": 0.236328125,
"learning_rate": 1.5313675615394373e-05,
"loss": 0.9182,
"step": 310
},
{
"epoch": 1.2,
"grad_norm": 0.326171875,
"learning_rate": 1.4936446941231186e-05,
"loss": 0.8967,
"step": 320
},
{
"epoch": 1.24,
"grad_norm": 0.2451171875,
"learning_rate": 1.4549689597561652e-05,
"loss": 0.8965,
"step": 330
},
{
"epoch": 1.28,
"grad_norm": 0.435546875,
"learning_rate": 1.4154150130018867e-05,
"loss": 0.9105,
"step": 340
},
{
"epoch": 1.32,
"grad_norm": 0.3359375,
"learning_rate": 1.375059203609562e-05,
"loss": 0.9089,
"step": 350
},
{
"epoch": 1.35,
"grad_norm": 0.2578125,
"learning_rate": 1.3339794291389015e-05,
"loss": 0.9066,
"step": 360
},
{
"epoch": 1.39,
"grad_norm": 0.271484375,
"learning_rate": 1.2922549845968174e-05,
"loss": 0.8878,
"step": 370
},
{
"epoch": 1.43,
"grad_norm": 0.30078125,
"learning_rate": 1.2499664093767458e-05,
"loss": 0.8909,
"step": 380
},
{
"epoch": 1.47,
"grad_norm": 2.8125,
"learning_rate": 1.2071953317959692e-05,
"loss": 0.8659,
"step": 390
},
{
"epoch": 1.51,
"grad_norm": 0.3125,
"learning_rate": 1.1640243115310219e-05,
"loss": 0.8685,
"step": 400
},
{
"epoch": 1.54,
"grad_norm": 0.216796875,
"learning_rate": 1.1205366802553231e-05,
"loss": 0.8959,
"step": 410
},
{
"epoch": 1.58,
"grad_norm": 0.310546875,
"learning_rate": 1.076816380786647e-05,
"loss": 0.9412,
"step": 420
},
{
"epoch": 1.62,
"grad_norm": 0.99609375,
"learning_rate": 1.0329478050549208e-05,
"loss": 0.8929,
"step": 430
},
{
"epoch": 1.66,
"grad_norm": 0.3359375,
"learning_rate": 9.890156312031165e-06,
"loss": 0.9102,
"step": 440
},
{
"epoch": 1.69,
"grad_norm": 0.37109375,
"learning_rate": 9.451046601356725e-06,
"loss": 0.8645,
"step": 450
},
{
"epoch": 1.73,
"grad_norm": 0.259765625,
"learning_rate": 9.012996518299547e-06,
"loss": 0.8554,
"step": 460
},
{
"epoch": 1.77,
"grad_norm": 0.314453125,
"learning_rate": 8.576851617267151e-06,
"loss": 0.9034,
"step": 470
},
{
"epoch": 1.81,
"grad_norm": 0.2001953125,
"learning_rate": 8.143453775153646e-06,
"loss": 0.8696,
"step": 480
},
{
"epoch": 1.84,
"grad_norm": 0.671875,
"learning_rate": 7.713639566291028e-06,
"loss": 0.8801,
"step": 490
},
{
"epoch": 1.88,
"grad_norm": 0.291015625,
"learning_rate": 7.2882386476358304e-06,
"loss": 0.8872,
"step": 500
},
{
"epoch": 1.92,
"grad_norm": 0.37109375,
"learning_rate": 6.868072157308213e-06,
"loss": 0.8586,
"step": 510
},
{
"epoch": 1.96,
"grad_norm": 0.7109375,
"learning_rate": 6.453951129574644e-06,
"loss": 0.8727,
"step": 520
},
{
"epoch": 1.99,
"grad_norm": 1.15625,
"learning_rate": 6.046674929333787e-06,
"loss": 0.8833,
"step": 530
}
],
"logging_steps": 10,
"max_steps": 795,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 1.03886006255616e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}