mistral-7b-grok / trainer_state.json
vwxyzjn's picture
Add HuggingFaceH4/mistral-7b-cai-v20.0.grokai.3.2 checkpoint
7c96ec4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9986257443884563,
"eval_steps": 500,
"global_step": 545,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 3.6363636363636366e-07,
"loss": 1.7361,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.8181818181818183e-06,
"loss": 1.597,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 3.6363636363636366e-06,
"loss": 1.2948,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 5.4545454545454545e-06,
"loss": 1.1706,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 7.272727272727273e-06,
"loss": 1.1151,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 9.090909090909091e-06,
"loss": 1.0596,
"step": 25
},
{
"epoch": 0.05,
"learning_rate": 1.0909090909090909e-05,
"loss": 1.0556,
"step": 30
},
{
"epoch": 0.06,
"learning_rate": 1.2727272727272728e-05,
"loss": 1.0255,
"step": 35
},
{
"epoch": 0.07,
"learning_rate": 1.4545454545454546e-05,
"loss": 1.0134,
"step": 40
},
{
"epoch": 0.08,
"learning_rate": 1.6363636363636366e-05,
"loss": 1.0155,
"step": 45
},
{
"epoch": 0.09,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.0141,
"step": 50
},
{
"epoch": 0.1,
"learning_rate": 2e-05,
"loss": 1.0085,
"step": 55
},
{
"epoch": 0.11,
"learning_rate": 1.999486216200688e-05,
"loss": 0.9965,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 1.9979453927503366e-05,
"loss": 0.9977,
"step": 65
},
{
"epoch": 0.13,
"learning_rate": 1.9953791129491985e-05,
"loss": 1.0026,
"step": 70
},
{
"epoch": 0.14,
"learning_rate": 1.991790013823246e-05,
"loss": 0.9994,
"step": 75
},
{
"epoch": 0.15,
"learning_rate": 1.9871817834144506e-05,
"loss": 0.9864,
"step": 80
},
{
"epoch": 0.16,
"learning_rate": 1.9815591569910654e-05,
"loss": 1.0066,
"step": 85
},
{
"epoch": 0.16,
"learning_rate": 1.9749279121818235e-05,
"loss": 0.9878,
"step": 90
},
{
"epoch": 0.17,
"learning_rate": 1.9672948630390296e-05,
"loss": 0.989,
"step": 95
},
{
"epoch": 0.18,
"learning_rate": 1.9586678530366607e-05,
"loss": 0.9942,
"step": 100
},
{
"epoch": 0.19,
"learning_rate": 1.949055747010669e-05,
"loss": 1.0028,
"step": 105
},
{
"epoch": 0.2,
"learning_rate": 1.9384684220497605e-05,
"loss": 0.9865,
"step": 110
},
{
"epoch": 0.21,
"learning_rate": 1.926916757346022e-05,
"loss": 0.9907,
"step": 115
},
{
"epoch": 0.22,
"learning_rate": 1.9144126230158127e-05,
"loss": 0.984,
"step": 120
},
{
"epoch": 0.23,
"learning_rate": 1.900968867902419e-05,
"loss": 0.9891,
"step": 125
},
{
"epoch": 0.24,
"learning_rate": 1.8865993063730003e-05,
"loss": 0.9858,
"step": 130
},
{
"epoch": 0.25,
"learning_rate": 1.8713187041233896e-05,
"loss": 0.9872,
"step": 135
},
{
"epoch": 0.26,
"learning_rate": 1.8551427630053464e-05,
"loss": 0.9844,
"step": 140
},
{
"epoch": 0.27,
"learning_rate": 1.8380881048918406e-05,
"loss": 0.9842,
"step": 145
},
{
"epoch": 0.27,
"learning_rate": 1.820172254596956e-05,
"loss": 0.984,
"step": 150
},
{
"epoch": 0.28,
"learning_rate": 1.8014136218679566e-05,
"loss": 0.9975,
"step": 155
},
{
"epoch": 0.29,
"learning_rate": 1.78183148246803e-05,
"loss": 0.9799,
"step": 160
},
{
"epoch": 0.3,
"learning_rate": 1.7614459583691346e-05,
"loss": 0.9936,
"step": 165
},
{
"epoch": 0.31,
"learning_rate": 1.7402779970753156e-05,
"loss": 0.9711,
"step": 170
},
{
"epoch": 0.32,
"learning_rate": 1.7183493500977277e-05,
"loss": 0.9767,
"step": 175
},
{
"epoch": 0.33,
"learning_rate": 1.6956825506034866e-05,
"loss": 0.9754,
"step": 180
},
{
"epoch": 0.34,
"learning_rate": 1.672300890261317e-05,
"loss": 0.9858,
"step": 185
},
{
"epoch": 0.35,
"learning_rate": 1.6482283953077887e-05,
"loss": 0.9779,
"step": 190
},
{
"epoch": 0.36,
"learning_rate": 1.6234898018587336e-05,
"loss": 0.9868,
"step": 195
},
{
"epoch": 0.37,
"learning_rate": 1.598110530491216e-05,
"loss": 0.9786,
"step": 200
},
{
"epoch": 0.38,
"learning_rate": 1.5721166601221697e-05,
"loss": 0.9665,
"step": 205
},
{
"epoch": 0.38,
"learning_rate": 1.5455349012105488e-05,
"loss": 0.9734,
"step": 210
},
{
"epoch": 0.39,
"learning_rate": 1.5183925683105254e-05,
"loss": 0.961,
"step": 215
},
{
"epoch": 0.4,
"learning_rate": 1.4907175520039381e-05,
"loss": 0.9664,
"step": 220
},
{
"epoch": 0.41,
"learning_rate": 1.4625382902408356e-05,
"loss": 0.9566,
"step": 225
},
{
"epoch": 0.42,
"learning_rate": 1.4338837391175582e-05,
"loss": 0.9733,
"step": 230
},
{
"epoch": 0.43,
"learning_rate": 1.4047833431223938e-05,
"loss": 0.9753,
"step": 235
},
{
"epoch": 0.44,
"learning_rate": 1.3752670048793744e-05,
"loss": 0.9666,
"step": 240
},
{
"epoch": 0.45,
"learning_rate": 1.3453650544213078e-05,
"loss": 0.9485,
"step": 245
},
{
"epoch": 0.46,
"learning_rate": 1.315108218023621e-05,
"loss": 0.9666,
"step": 250
},
{
"epoch": 0.47,
"learning_rate": 1.2845275866310325e-05,
"loss": 0.9539,
"step": 255
},
{
"epoch": 0.48,
"learning_rate": 1.2536545839095074e-05,
"loss": 0.9516,
"step": 260
},
{
"epoch": 0.49,
"learning_rate": 1.2225209339563144e-05,
"loss": 0.9611,
"step": 265
},
{
"epoch": 0.49,
"learning_rate": 1.1911586287013726e-05,
"loss": 0.964,
"step": 270
},
{
"epoch": 0.5,
"learning_rate": 1.1595998950333794e-05,
"loss": 0.9584,
"step": 275
},
{
"epoch": 0.51,
"learning_rate": 1.1278771616845061e-05,
"loss": 0.9472,
"step": 280
},
{
"epoch": 0.52,
"learning_rate": 1.0960230259076819e-05,
"loss": 0.9521,
"step": 285
},
{
"epoch": 0.53,
"learning_rate": 1.064070219980713e-05,
"loss": 0.9426,
"step": 290
},
{
"epoch": 0.54,
"learning_rate": 1.0320515775716556e-05,
"loss": 0.9434,
"step": 295
},
{
"epoch": 0.55,
"learning_rate": 1e-05,
"loss": 0.9592,
"step": 300
},
{
"epoch": 0.56,
"learning_rate": 9.67948422428345e-06,
"loss": 0.9556,
"step": 305
},
{
"epoch": 0.57,
"learning_rate": 9.359297800192873e-06,
"loss": 0.9445,
"step": 310
},
{
"epoch": 0.58,
"learning_rate": 9.039769740923183e-06,
"loss": 0.9506,
"step": 315
},
{
"epoch": 0.59,
"learning_rate": 8.721228383154939e-06,
"loss": 0.9342,
"step": 320
},
{
"epoch": 0.6,
"learning_rate": 8.404001049666211e-06,
"loss": 0.9377,
"step": 325
},
{
"epoch": 0.6,
"learning_rate": 8.08841371298628e-06,
"loss": 0.9539,
"step": 330
},
{
"epoch": 0.61,
"learning_rate": 7.774790660436857e-06,
"loss": 0.9461,
"step": 335
},
{
"epoch": 0.62,
"learning_rate": 7.463454160904928e-06,
"loss": 0.9457,
"step": 340
},
{
"epoch": 0.63,
"learning_rate": 7.154724133689677e-06,
"loss": 0.9402,
"step": 345
},
{
"epoch": 0.64,
"learning_rate": 6.848917819763794e-06,
"loss": 0.9472,
"step": 350
},
{
"epoch": 0.65,
"learning_rate": 6.546349455786926e-06,
"loss": 0.9309,
"step": 355
},
{
"epoch": 0.66,
"learning_rate": 6.24732995120626e-06,
"loss": 0.9274,
"step": 360
},
{
"epoch": 0.67,
"learning_rate": 5.952166568776062e-06,
"loss": 0.9432,
"step": 365
},
{
"epoch": 0.68,
"learning_rate": 5.66116260882442e-06,
"loss": 0.9349,
"step": 370
},
{
"epoch": 0.69,
"learning_rate": 5.37461709759165e-06,
"loss": 0.9517,
"step": 375
},
{
"epoch": 0.7,
"learning_rate": 5.092824479960625e-06,
"loss": 0.9277,
"step": 380
},
{
"epoch": 0.71,
"learning_rate": 4.81607431689475e-06,
"loss": 0.9399,
"step": 385
},
{
"epoch": 0.71,
"learning_rate": 4.544650987894514e-06,
"loss": 0.9413,
"step": 390
},
{
"epoch": 0.72,
"learning_rate": 4.278833398778306e-06,
"loss": 0.9442,
"step": 395
},
{
"epoch": 0.73,
"learning_rate": 4.01889469508784e-06,
"loss": 0.9457,
"step": 400
},
{
"epoch": 0.74,
"learning_rate": 3.7651019814126656e-06,
"loss": 0.929,
"step": 405
},
{
"epoch": 0.75,
"learning_rate": 3.5177160469221184e-06,
"loss": 0.9365,
"step": 410
},
{
"epoch": 0.76,
"learning_rate": 3.2769910973868314e-06,
"loss": 0.9393,
"step": 415
},
{
"epoch": 0.77,
"learning_rate": 3.0431744939651365e-06,
"loss": 0.9292,
"step": 420
},
{
"epoch": 0.78,
"learning_rate": 2.8165064990227255e-06,
"loss": 0.9208,
"step": 425
},
{
"epoch": 0.79,
"learning_rate": 2.597220029246846e-06,
"loss": 0.9307,
"step": 430
},
{
"epoch": 0.8,
"learning_rate": 2.3855404163086558e-06,
"loss": 0.9311,
"step": 435
},
{
"epoch": 0.81,
"learning_rate": 2.1816851753197023e-06,
"loss": 0.9253,
"step": 440
},
{
"epoch": 0.82,
"learning_rate": 1.9858637813204352e-06,
"loss": 0.9221,
"step": 445
},
{
"epoch": 0.82,
"learning_rate": 1.7982774540304404e-06,
"loss": 0.9256,
"step": 450
},
{
"epoch": 0.83,
"learning_rate": 1.6191189510815942e-06,
"loss": 0.9384,
"step": 455
},
{
"epoch": 0.84,
"learning_rate": 1.4485723699465392e-06,
"loss": 0.9402,
"step": 460
},
{
"epoch": 0.85,
"learning_rate": 1.286812958766106e-06,
"loss": 0.9339,
"step": 465
},
{
"epoch": 0.86,
"learning_rate": 1.134006936269999e-06,
"loss": 0.9241,
"step": 470
},
{
"epoch": 0.87,
"learning_rate": 9.903113209758098e-07,
"loss": 0.9247,
"step": 475
},
{
"epoch": 0.88,
"learning_rate": 8.558737698418762e-07,
"loss": 0.9238,
"step": 480
},
{
"epoch": 0.89,
"learning_rate": 7.308324265397837e-07,
"loss": 0.9252,
"step": 485
},
{
"epoch": 0.9,
"learning_rate": 6.153157795023956e-07,
"loss": 0.9266,
"step": 490
},
{
"epoch": 0.91,
"learning_rate": 5.094425298933136e-07,
"loss": 0.931,
"step": 495
},
{
"epoch": 0.92,
"learning_rate": 4.133214696333943e-07,
"loss": 0.9207,
"step": 500
},
{
"epoch": 0.93,
"learning_rate": 3.2705136960970554e-07,
"loss": 0.9194,
"step": 505
},
{
"epoch": 0.93,
"learning_rate": 2.507208781817638e-07,
"loss": 0.9219,
"step": 510
},
{
"epoch": 0.94,
"learning_rate": 1.844084300893456e-07,
"loss": 0.923,
"step": 515
},
{
"epoch": 0.95,
"learning_rate": 1.2818216585549824e-07,
"loss": 0.9317,
"step": 520
},
{
"epoch": 0.96,
"learning_rate": 8.209986176753947e-08,
"loss": 0.9299,
"step": 525
},
{
"epoch": 0.97,
"learning_rate": 4.6208870508017703e-08,
"loss": 0.9231,
"step": 530
},
{
"epoch": 0.98,
"learning_rate": 2.054607249663665e-08,
"loss": 0.9343,
"step": 535
},
{
"epoch": 0.99,
"learning_rate": 5.137837993121064e-09,
"loss": 0.9226,
"step": 540
},
{
"epoch": 1.0,
"learning_rate": 0.0,
"loss": 0.9326,
"step": 545
},
{
"epoch": 1.0,
"eval_loss": 0.934799313545227,
"eval_runtime": 143.5923,
"eval_samples_per_second": 108.007,
"eval_steps_per_second": 1.692,
"step": 545
},
{
"epoch": 1.0,
"step": 545,
"total_flos": 456238269726720.0,
"train_loss": 0.9725383741046311,
"train_runtime": 5277.8235,
"train_samples_per_second": 26.46,
"train_steps_per_second": 0.103
}
],
"logging_steps": 5,
"max_steps": 545,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 55,
"total_flos": 456238269726720.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}