llama3.1-8b-gpt4o_100k_closedqa-k / trainer_state.json
chansung's picture
Model save
b7b7975 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 256,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00390625,
"grad_norm": 1.1706680059432983,
"learning_rate": 1.1538461538461538e-05,
"loss": 1.9646,
"step": 1
},
{
"epoch": 0.01953125,
"grad_norm": 1.8140126466751099,
"learning_rate": 5.769230769230769e-05,
"loss": 1.9391,
"step": 5
},
{
"epoch": 0.0390625,
"grad_norm": 0.9559171795845032,
"learning_rate": 0.00011538461538461538,
"loss": 1.8731,
"step": 10
},
{
"epoch": 0.05859375,
"grad_norm": 1.280180811882019,
"learning_rate": 0.00017307692307692304,
"loss": 1.663,
"step": 15
},
{
"epoch": 0.078125,
"grad_norm": 1.2522212266921997,
"learning_rate": 0.00023076923076923076,
"loss": 1.2646,
"step": 20
},
{
"epoch": 0.09765625,
"grad_norm": 0.3725765645503998,
"learning_rate": 0.00028846153846153843,
"loss": 1.1492,
"step": 25
},
{
"epoch": 0.1171875,
"grad_norm": 0.4075080156326294,
"learning_rate": 0.00029977617052242417,
"loss": 1.0681,
"step": 30
},
{
"epoch": 0.13671875,
"grad_norm": 0.572589635848999,
"learning_rate": 0.0002988680080036802,
"loss": 1.0165,
"step": 35
},
{
"epoch": 0.15625,
"grad_norm": 0.30389469861984253,
"learning_rate": 0.00029726575411133377,
"loss": 0.9663,
"step": 40
},
{
"epoch": 0.17578125,
"grad_norm": 0.24198134243488312,
"learning_rate": 0.0002949768792926617,
"loss": 0.9329,
"step": 45
},
{
"epoch": 0.1953125,
"grad_norm": 0.19795848429203033,
"learning_rate": 0.00029201205533865653,
"loss": 0.9227,
"step": 50
},
{
"epoch": 0.21484375,
"grad_norm": 0.19215987622737885,
"learning_rate": 0.00028838510562721075,
"loss": 0.9304,
"step": 55
},
{
"epoch": 0.234375,
"grad_norm": 0.18681851029396057,
"learning_rate": 0.00028411294067214764,
"loss": 0.9045,
"step": 60
},
{
"epoch": 0.25390625,
"grad_norm": 0.17366646230220795,
"learning_rate": 0.00027921547927859996,
"loss": 0.8884,
"step": 65
},
{
"epoch": 0.2734375,
"grad_norm": 0.1574220508337021,
"learning_rate": 0.0002737155556723452,
"loss": 0.8777,
"step": 70
},
{
"epoch": 0.29296875,
"grad_norm": 0.17448483407497406,
"learning_rate": 0.0002676388130361047,
"loss": 0.8948,
"step": 75
},
{
"epoch": 0.3125,
"grad_norm": 0.15831591188907623,
"learning_rate": 0.00026101358394918777,
"loss": 0.8762,
"step": 80
},
{
"epoch": 0.33203125,
"grad_norm": 0.1556362807750702,
"learning_rate": 0.0002538707582879288,
"loss": 0.8668,
"step": 85
},
{
"epoch": 0.3515625,
"grad_norm": 0.1763574779033661,
"learning_rate": 0.00024624363920282413,
"loss": 0.8855,
"step": 90
},
{
"epoch": 0.37109375,
"grad_norm": 0.18880251049995422,
"learning_rate": 0.00023816778784387094,
"loss": 0.8553,
"step": 95
},
{
"epoch": 0.390625,
"grad_norm": 0.1806374490261078,
"learning_rate": 0.0002296808575580705,
"loss": 0.8632,
"step": 100
},
{
"epoch": 0.41015625,
"grad_norm": 0.15463082492351532,
"learning_rate": 0.0002208224183321428,
"loss": 0.8585,
"step": 105
},
{
"epoch": 0.4296875,
"grad_norm": 0.16831685602664948,
"learning_rate": 0.00021163377229898225,
"loss": 0.8478,
"step": 110
},
{
"epoch": 0.44921875,
"grad_norm": 0.18693634867668152,
"learning_rate": 0.00020215776116804833,
"loss": 0.8508,
"step": 115
},
{
"epoch": 0.46875,
"grad_norm": 0.17928317189216614,
"learning_rate": 0.00019243856647753948,
"loss": 0.8551,
"step": 120
},
{
"epoch": 0.48828125,
"grad_norm": 0.16863255202770233,
"learning_rate": 0.00018252150359966712,
"loss": 0.8521,
"step": 125
},
{
"epoch": 0.5078125,
"grad_norm": 0.16852350533008575,
"learning_rate": 0.00017245281045947164,
"loss": 0.849,
"step": 130
},
{
"epoch": 0.52734375,
"grad_norm": 0.182328999042511,
"learning_rate": 0.00016227943195227197,
"loss": 0.8521,
"step": 135
},
{
"epoch": 0.546875,
"grad_norm": 0.18237030506134033,
"learning_rate": 0.00015204880106489262,
"loss": 0.8487,
"step": 140
},
{
"epoch": 0.56640625,
"grad_norm": 0.20504002273082733,
"learning_rate": 0.0001418086177211835,
"loss": 0.8422,
"step": 145
},
{
"epoch": 0.5859375,
"grad_norm": 0.21228522062301636,
"learning_rate": 0.00013160662638295526,
"loss": 0.8317,
"step": 150
},
{
"epoch": 0.60546875,
"grad_norm": 0.20387493073940277,
"learning_rate": 0.00012149039344325893,
"loss": 0.8359,
"step": 155
},
{
"epoch": 0.625,
"grad_norm": 0.22508500516414642,
"learning_rate": 0.00011150708544990398,
"loss": 0.8432,
"step": 160
},
{
"epoch": 0.64453125,
"grad_norm": 0.1838555485010147,
"learning_rate": 0.00010170324919323928,
"loss": 0.8395,
"step": 165
},
{
"epoch": 0.6640625,
"grad_norm": 0.17921607196331024,
"learning_rate": 9.212459468352966e-05,
"loss": 0.8267,
"step": 170
},
{
"epoch": 0.68359375,
"grad_norm": 0.1911832094192505,
"learning_rate": 8.281578202978773e-05,
"loss": 0.8271,
"step": 175
},
{
"epoch": 0.703125,
"grad_norm": 0.17375214397907257,
"learning_rate": 7.382021321372908e-05,
"loss": 0.833,
"step": 180
},
{
"epoch": 0.72265625,
"grad_norm": 0.204771026968956,
"learning_rate": 6.517982972969911e-05,
"loss": 0.8364,
"step": 185
},
{
"epoch": 0.7421875,
"grad_norm": 0.20459486544132233,
"learning_rate": 5.693491703406478e-05,
"loss": 0.8256,
"step": 190
},
{
"epoch": 0.76171875,
"grad_norm": 0.1818506121635437,
"learning_rate": 4.912391671582092e-05,
"loss": 0.8423,
"step": 195
},
{
"epoch": 0.78125,
"grad_norm": 0.1734897941350937,
"learning_rate": 4.178324726415664e-05,
"loss": 0.8291,
"step": 200
},
{
"epoch": 0.80078125,
"grad_norm": 0.18124301731586456,
"learning_rate": 3.494713426864761e-05,
"loss": 0.8296,
"step": 205
},
{
"epoch": 0.8203125,
"grad_norm": 0.1811358779668808,
"learning_rate": 2.8647450843757897e-05,
"loss": 0.8402,
"step": 210
},
{
"epoch": 0.83984375,
"grad_norm": 0.20237997174263,
"learning_rate": 2.291356902166746e-05,
"loss": 0.8219,
"step": 215
},
{
"epoch": 0.859375,
"grad_norm": 0.17338159680366516,
"learning_rate": 1.7772222806299264e-05,
"loss": 0.8263,
"step": 220
},
{
"epoch": 0.87890625,
"grad_norm": 0.17720431089401245,
"learning_rate": 1.3247383527051985e-05,
"loss": 0.8302,
"step": 225
},
{
"epoch": 0.8984375,
"grad_norm": 0.1745912879705429,
"learning_rate": 9.360148073396962e-06,
"loss": 0.8252,
"step": 230
},
{
"epoch": 0.91796875,
"grad_norm": 0.1862880140542984,
"learning_rate": 6.128640531440515e-06,
"loss": 0.8411,
"step": 235
},
{
"epoch": 0.9375,
"grad_norm": 0.17852172255516052,
"learning_rate": 3.5679276810683167e-06,
"loss": 0.8201,
"step": 240
},
{
"epoch": 0.95703125,
"grad_norm": 0.19089260697364807,
"learning_rate": 1.6899487476622898e-06,
"loss": 0.8322,
"step": 245
},
{
"epoch": 0.9765625,
"grad_norm": 0.1653287410736084,
"learning_rate": 5.034597359205639e-07,
"loss": 0.832,
"step": 250
},
{
"epoch": 0.99609375,
"grad_norm": 0.17792922258377075,
"learning_rate": 1.3992605321688776e-08,
"loss": 0.8284,
"step": 255
},
{
"epoch": 1.0,
"step": 256,
"total_flos": 7.558147382936863e+17,
"train_loss": 0.0,
"train_runtime": 1.5255,
"train_samples_per_second": 10721.267,
"train_steps_per_second": 167.817
}
],
"logging_steps": 5,
"max_steps": 256,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.558147382936863e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}