term_Qwen2_7_json_lora / trainer_state.json
weiiv's picture
Upload folder using huggingface_hub
4095c97 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.4033333333333333,
"eval_steps": 500,
"global_step": 242,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008333333333333333,
"grad_norm": 0.48900601267814636,
"learning_rate": 4.999904807660428e-05,
"loss": 0.4212,
"num_input_tokens_seen": 30064,
"step": 5
},
{
"epoch": 0.016666666666666666,
"grad_norm": 0.3595407009124756,
"learning_rate": 4.9996192378909786e-05,
"loss": 0.2803,
"num_input_tokens_seen": 57376,
"step": 10
},
{
"epoch": 0.025,
"grad_norm": 0.2623545527458191,
"learning_rate": 4.999143312438893e-05,
"loss": 0.1484,
"num_input_tokens_seen": 86160,
"step": 15
},
{
"epoch": 0.03333333333333333,
"grad_norm": 0.2743065357208252,
"learning_rate": 4.99847706754774e-05,
"loss": 0.1151,
"num_input_tokens_seen": 110592,
"step": 20
},
{
"epoch": 0.041666666666666664,
"grad_norm": 0.2651338577270508,
"learning_rate": 4.997620553954645e-05,
"loss": 0.0751,
"num_input_tokens_seen": 138880,
"step": 25
},
{
"epoch": 0.05,
"grad_norm": 0.10798148065805435,
"learning_rate": 4.996573836886435e-05,
"loss": 0.0471,
"num_input_tokens_seen": 169600,
"step": 30
},
{
"epoch": 0.058333333333333334,
"grad_norm": 0.1552838534116745,
"learning_rate": 4.9953369960546676e-05,
"loss": 0.0368,
"num_input_tokens_seen": 201664,
"step": 35
},
{
"epoch": 0.06666666666666667,
"grad_norm": 0.11832074075937271,
"learning_rate": 4.993910125649561e-05,
"loss": 0.0311,
"num_input_tokens_seen": 229936,
"step": 40
},
{
"epoch": 0.075,
"grad_norm": 0.10593917220830917,
"learning_rate": 4.99229333433282e-05,
"loss": 0.0262,
"num_input_tokens_seen": 259088,
"step": 45
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.09650842100381851,
"learning_rate": 4.990486745229364e-05,
"loss": 0.0244,
"num_input_tokens_seen": 289824,
"step": 50
},
{
"epoch": 0.09166666666666666,
"grad_norm": 0.14155137538909912,
"learning_rate": 4.988490495917947e-05,
"loss": 0.0266,
"num_input_tokens_seen": 317760,
"step": 55
},
{
"epoch": 0.1,
"grad_norm": 0.1490347981452942,
"learning_rate": 4.9867570427929354e-05,
"loss": 0.0283,
"num_input_tokens_seen": 343120,
"step": 60
},
{
"epoch": 0.10833333333333334,
"grad_norm": 0.17539122700691223,
"learning_rate": 4.984419797901491e-05,
"loss": 0.0265,
"num_input_tokens_seen": 371600,
"step": 65
},
{
"epoch": 0.11666666666666667,
"grad_norm": 0.1336919367313385,
"learning_rate": 4.981893354823614e-05,
"loss": 0.0235,
"num_input_tokens_seen": 398720,
"step": 70
},
{
"epoch": 0.125,
"grad_norm": 0.12203177809715271,
"learning_rate": 4.979177905957726e-05,
"loss": 0.0229,
"num_input_tokens_seen": 426416,
"step": 75
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.2557654082775116,
"learning_rate": 4.976273658095772e-05,
"loss": 0.0238,
"num_input_tokens_seen": 452944,
"step": 80
},
{
"epoch": 0.14166666666666666,
"grad_norm": 0.13327105343341827,
"learning_rate": 4.9731808324074717e-05,
"loss": 0.0208,
"num_input_tokens_seen": 480896,
"step": 85
},
{
"epoch": 0.15,
"grad_norm": 0.1262751668691635,
"learning_rate": 4.969899664423473e-05,
"loss": 0.0191,
"num_input_tokens_seen": 510096,
"step": 90
},
{
"epoch": 0.15833333333333333,
"grad_norm": 0.16084067523479462,
"learning_rate": 4.966430404017424e-05,
"loss": 0.0205,
"num_input_tokens_seen": 536672,
"step": 95
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.15870437026023865,
"learning_rate": 4.962773315386935e-05,
"loss": 0.0193,
"num_input_tokens_seen": 566528,
"step": 100
},
{
"epoch": 0.175,
"grad_norm": 0.07833187282085419,
"learning_rate": 4.9589286770334654e-05,
"loss": 0.024,
"num_input_tokens_seen": 593584,
"step": 105
},
{
"epoch": 0.18333333333333332,
"grad_norm": 0.09176061302423477,
"learning_rate": 4.954896781741109e-05,
"loss": 0.0206,
"num_input_tokens_seen": 620992,
"step": 110
},
{
"epoch": 0.19166666666666668,
"grad_norm": 0.08285439014434814,
"learning_rate": 4.9506779365543046e-05,
"loss": 0.0188,
"num_input_tokens_seen": 647504,
"step": 115
},
{
"epoch": 0.2,
"grad_norm": 0.13359522819519043,
"learning_rate": 4.9462724627544466e-05,
"loss": 0.0183,
"num_input_tokens_seen": 676336,
"step": 120
},
{
"epoch": 0.20833333333333334,
"grad_norm": 0.07570718228816986,
"learning_rate": 4.94168069583542e-05,
"loss": 0.0189,
"num_input_tokens_seen": 701488,
"step": 125
},
{
"epoch": 0.21666666666666667,
"grad_norm": 0.11286037415266037,
"learning_rate": 4.936902985478055e-05,
"loss": 0.0213,
"num_input_tokens_seen": 731152,
"step": 130
},
{
"epoch": 0.225,
"grad_norm": 0.17183929681777954,
"learning_rate": 4.931939695523492e-05,
"loss": 0.0214,
"num_input_tokens_seen": 757792,
"step": 135
},
{
"epoch": 0.23333333333333334,
"grad_norm": 0.11746834218502045,
"learning_rate": 4.926791203945477e-05,
"loss": 0.0214,
"num_input_tokens_seen": 786064,
"step": 140
},
{
"epoch": 0.24166666666666667,
"grad_norm": 0.09453292936086655,
"learning_rate": 4.9214579028215776e-05,
"loss": 0.0203,
"num_input_tokens_seen": 814640,
"step": 145
},
{
"epoch": 0.25,
"grad_norm": 0.2074723094701767,
"learning_rate": 4.915940198303324e-05,
"loss": 0.0194,
"num_input_tokens_seen": 842656,
"step": 150
},
{
"epoch": 0.25833333333333336,
"grad_norm": 0.15371476113796234,
"learning_rate": 4.910238510585276e-05,
"loss": 0.0158,
"num_input_tokens_seen": 868512,
"step": 155
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.07177837193012238,
"learning_rate": 4.9043532738730284e-05,
"loss": 0.0172,
"num_input_tokens_seen": 896416,
"step": 160
},
{
"epoch": 0.275,
"grad_norm": 0.07891824096441269,
"learning_rate": 4.898284936350144e-05,
"loss": 0.0193,
"num_input_tokens_seen": 924720,
"step": 165
},
{
"epoch": 0.2833333333333333,
"grad_norm": 0.08302944153547287,
"learning_rate": 4.892033960144019e-05,
"loss": 0.0189,
"num_input_tokens_seen": 955584,
"step": 170
},
{
"epoch": 0.2916666666666667,
"grad_norm": 0.25554358959198,
"learning_rate": 4.8856008212906925e-05,
"loss": 0.019,
"num_input_tokens_seen": 985072,
"step": 175
},
{
"epoch": 0.3,
"grad_norm": 0.1445908546447754,
"learning_rate": 4.878986009698596e-05,
"loss": 0.0168,
"num_input_tokens_seen": 1013296,
"step": 180
},
{
"epoch": 0.30833333333333335,
"grad_norm": 0.21417762339115143,
"learning_rate": 4.8721900291112415e-05,
"loss": 0.0194,
"num_input_tokens_seen": 1043664,
"step": 185
},
{
"epoch": 0.31666666666666665,
"grad_norm": 0.1550753116607666,
"learning_rate": 4.8652133970688636e-05,
"loss": 0.0155,
"num_input_tokens_seen": 1070912,
"step": 190
},
{
"epoch": 0.325,
"grad_norm": 0.132898211479187,
"learning_rate": 4.858056644869002e-05,
"loss": 0.0193,
"num_input_tokens_seen": 1097856,
"step": 195
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.09164389222860336,
"learning_rate": 4.850720317526047e-05,
"loss": 0.0164,
"num_input_tokens_seen": 1126128,
"step": 200
},
{
"epoch": 0.3416666666666667,
"grad_norm": 0.07462836056947708,
"learning_rate": 4.843204973729729e-05,
"loss": 0.014,
"num_input_tokens_seen": 1157584,
"step": 205
},
{
"epoch": 0.35,
"grad_norm": 0.08473635464906693,
"learning_rate": 4.835511185802574e-05,
"loss": 0.0154,
"num_input_tokens_seen": 1184800,
"step": 210
},
{
"epoch": 0.35833333333333334,
"grad_norm": 0.07371218502521515,
"learning_rate": 4.827639539656321e-05,
"loss": 0.0147,
"num_input_tokens_seen": 1213104,
"step": 215
},
{
"epoch": 0.36666666666666664,
"grad_norm": 0.1394609957933426,
"learning_rate": 4.8195906347473e-05,
"loss": 0.0185,
"num_input_tokens_seen": 1241808,
"step": 220
},
{
"epoch": 0.375,
"grad_norm": 0.11585802584886551,
"learning_rate": 4.8113650840307834e-05,
"loss": 0.016,
"num_input_tokens_seen": 1270160,
"step": 225
},
{
"epoch": 0.38333333333333336,
"grad_norm": 0.1538001149892807,
"learning_rate": 4.802963513914304e-05,
"loss": 0.0168,
"num_input_tokens_seen": 1297456,
"step": 230
},
{
"epoch": 0.39166666666666666,
"grad_norm": 0.09096246212720871,
"learning_rate": 4.794386564209953e-05,
"loss": 0.0154,
"num_input_tokens_seen": 1323392,
"step": 235
},
{
"epoch": 0.4,
"grad_norm": 0.11348401010036469,
"learning_rate": 4.7856348880856595e-05,
"loss": 0.0149,
"num_input_tokens_seen": 1358576,
"step": 240
},
{
"epoch": 0.4033333333333333,
"num_input_tokens_seen": 1371584,
"step": 242,
"total_flos": 4652852538507264.0,
"train_loss": 0.040362141964849364,
"train_runtime": 4583.2214,
"train_samples_per_second": 6.284,
"train_steps_per_second": 0.393
}
],
"logging_steps": 5,
"max_steps": 1800,
"num_input_tokens_seen": 1371584,
"num_train_epochs": 3,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4652852538507264.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}