chansung's picture
Model save
384ce50 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 270,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.003703703703703704,
"grad_norm": 0.6600888967514038,
"learning_rate": 1.111111111111111e-05,
"loss": 1.6547,
"step": 1
},
{
"epoch": 0.018518518518518517,
"grad_norm": 1.0668487548828125,
"learning_rate": 5.5555555555555545e-05,
"loss": 1.6004,
"step": 5
},
{
"epoch": 0.037037037037037035,
"grad_norm": 0.8215888142585754,
"learning_rate": 0.00011111111111111109,
"loss": 1.5361,
"step": 10
},
{
"epoch": 0.05555555555555555,
"grad_norm": 1.627755045890808,
"learning_rate": 0.00016666666666666666,
"loss": 1.3368,
"step": 15
},
{
"epoch": 0.07407407407407407,
"grad_norm": 1.0417497158050537,
"learning_rate": 0.00022222222222222218,
"loss": 0.9318,
"step": 20
},
{
"epoch": 0.09259259259259259,
"grad_norm": 0.45057186484336853,
"learning_rate": 0.0002777777777777778,
"loss": 0.7995,
"step": 25
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.3393832743167877,
"learning_rate": 0.0002998871928756345,
"loss": 0.7298,
"step": 30
},
{
"epoch": 0.12962962962962962,
"grad_norm": 0.2655043601989746,
"learning_rate": 0.0002991984303609902,
"loss": 0.6803,
"step": 35
},
{
"epoch": 0.14814814814814814,
"grad_norm": 0.1693999469280243,
"learning_rate": 0.0002978864495017194,
"loss": 0.65,
"step": 40
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.1550581008195877,
"learning_rate": 0.00029595673058697357,
"loss": 0.6344,
"step": 45
},
{
"epoch": 0.18518518518518517,
"grad_norm": 0.13547781109809875,
"learning_rate": 0.0002934173342660819,
"loss": 0.6189,
"step": 50
},
{
"epoch": 0.2037037037037037,
"grad_norm": 0.12479308992624283,
"learning_rate": 0.00029027886787832844,
"loss": 0.6105,
"step": 55
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.14283517003059387,
"learning_rate": 0.000286554441144922,
"loss": 0.6005,
"step": 60
},
{
"epoch": 0.24074074074074073,
"grad_norm": 0.15204459428787231,
"learning_rate": 0.0002822596114082412,
"loss": 0.6063,
"step": 65
},
{
"epoch": 0.25925925925925924,
"grad_norm": 0.1366046667098999,
"learning_rate": 0.0002774123186470946,
"loss": 0.5926,
"step": 70
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.13155071437358856,
"learning_rate": 0.0002720328105394451,
"loss": 0.5877,
"step": 75
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.14542943239212036,
"learning_rate": 0.00026614355788561985,
"loss": 0.5688,
"step": 80
},
{
"epoch": 0.3148148148148148,
"grad_norm": 0.14441293478012085,
"learning_rate": 0.00025976916074529183,
"loss": 0.5706,
"step": 85
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.13577958941459656,
"learning_rate": 0.00025293624568031,
"loss": 0.5717,
"step": 90
},
{
"epoch": 0.35185185185185186,
"grad_norm": 0.16319440305233002,
"learning_rate": 0.0002456733545326059,
"loss": 0.5709,
"step": 95
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.13978739082813263,
"learning_rate": 0.00023801082520176267,
"loss": 0.5565,
"step": 100
},
{
"epoch": 0.3888888888888889,
"grad_norm": 0.16405534744262695,
"learning_rate": 0.0002299806649202537,
"loss": 0.5682,
"step": 105
},
{
"epoch": 0.4074074074074074,
"grad_norm": 0.14882802963256836,
"learning_rate": 0.00022161641655569234,
"loss": 0.5525,
"step": 110
},
{
"epoch": 0.42592592592592593,
"grad_norm": 0.15766838192939758,
"learning_rate": 0.00021295301849856435,
"loss": 0.5525,
"step": 115
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.16089104115962982,
"learning_rate": 0.00020402665872070654,
"loss": 0.5477,
"step": 120
},
{
"epoch": 0.46296296296296297,
"grad_norm": 0.1631491482257843,
"learning_rate": 0.00019487462361414626,
"loss": 0.5503,
"step": 125
},
{
"epoch": 0.48148148148148145,
"grad_norm": 0.15596701204776764,
"learning_rate": 0.00018553514224171783,
"loss": 0.5469,
"step": 130
},
{
"epoch": 0.5,
"grad_norm": 0.1614404171705246,
"learning_rate": 0.00017604722665003956,
"loss": 0.5532,
"step": 135
},
{
"epoch": 0.5185185185185185,
"grad_norm": 0.15226240456104279,
"learning_rate": 0.00016645050891187974,
"loss": 0.5432,
"step": 140
},
{
"epoch": 0.5370370370370371,
"grad_norm": 0.16388216614723206,
"learning_rate": 0.00015678507557860595,
"loss": 0.542,
"step": 145
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.15366673469543457,
"learning_rate": 0.00014709130023422633,
"loss": 0.5475,
"step": 150
},
{
"epoch": 0.5740740740740741,
"grad_norm": 0.16850697994232178,
"learning_rate": 0.00013740967485046393,
"loss": 0.5516,
"step": 155
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.16164420545101166,
"learning_rate": 0.0001277806406473127,
"loss": 0.5372,
"step": 160
},
{
"epoch": 0.6111111111111112,
"grad_norm": 0.16969184577465057,
"learning_rate": 0.00011824441916558842,
"loss": 0.5324,
"step": 165
},
{
"epoch": 0.6296296296296297,
"grad_norm": 0.17503169178962708,
"learning_rate": 0.00010884084425710479,
"loss": 0.5372,
"step": 170
},
{
"epoch": 0.6481481481481481,
"grad_norm": 0.17154920101165771,
"learning_rate": 9.960919569426869e-05,
"loss": 0.5339,
"step": 175
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.16324155032634735,
"learning_rate": 9.058803509412646e-05,
"loss": 0.5415,
"step": 180
},
{
"epoch": 0.6851851851851852,
"grad_norm": 0.1716451346874237,
"learning_rate": 8.18150448422249e-05,
"loss": 0.5329,
"step": 185
},
{
"epoch": 0.7037037037037037,
"grad_norm": 0.15859860181808472,
"learning_rate": 7.332687068911903e-05,
"loss": 0.5368,
"step": 190
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.16680656373500824,
"learning_rate": 6.515896867701923e-05,
"loss": 0.538,
"step": 195
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.1692035049200058,
"learning_rate": 5.734545703598145e-05,
"loss": 0.5323,
"step": 200
},
{
"epoch": 0.7592592592592593,
"grad_norm": 0.1742638200521469,
"learning_rate": 4.991897366828704e-05,
"loss": 0.5375,
"step": 205
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.1706262081861496,
"learning_rate": 4.2910539816315164e-05,
"loss": 0.5286,
"step": 210
},
{
"epoch": 0.7962962962962963,
"grad_norm": 0.15826331079006195,
"learning_rate": 3.6349430483382306e-05,
"loss": 0.5273,
"step": 215
},
{
"epoch": 0.8148148148148148,
"grad_norm": 0.15241724252700806,
"learning_rate": 3.0263052148816046e-05,
"loss": 0.5285,
"step": 220
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.1507098227739334,
"learning_rate": 2.4676828288059558e-05,
"loss": 0.5356,
"step": 225
},
{
"epoch": 0.8518518518518519,
"grad_norm": 0.15940095484256744,
"learning_rate": 1.9614093176002828e-05,
"loss": 0.5417,
"step": 230
},
{
"epoch": 0.8703703703703703,
"grad_norm": 0.16012977063655853,
"learning_rate": 1.5095994417136053e-05,
"loss": 0.5323,
"step": 235
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.1607007086277008,
"learning_rate": 1.1141404609666449e-05,
"loss": 0.5234,
"step": 240
},
{
"epoch": 0.9074074074074074,
"grad_norm": 0.15569806098937988,
"learning_rate": 7.766842512588529e-06,
"loss": 0.5293,
"step": 245
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.15847626328468323,
"learning_rate": 4.986404045000697e-06,
"loss": 0.5286,
"step": 250
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.16243720054626465,
"learning_rate": 2.811703405892296e-06,
"loss": 0.525,
"step": 255
},
{
"epoch": 0.9629629629629629,
"grad_norm": 0.1597120612859726,
"learning_rate": 1.2518245603498345e-06,
"loss": 0.5295,
"step": 260
},
{
"epoch": 0.9814814814814815,
"grad_norm": 0.15212252736091614,
"learning_rate": 3.1328329483019663e-07,
"loss": 0.5356,
"step": 265
},
{
"epoch": 1.0,
"grad_norm": 0.15848107635974884,
"learning_rate": 0.0,
"loss": 0.5385,
"step": 270
},
{
"epoch": 1.0,
"eval_loss": 1.2769262790679932,
"eval_runtime": 1.0983,
"eval_samples_per_second": 3.642,
"eval_steps_per_second": 0.911,
"step": 270
},
{
"epoch": 1.0,
"step": 270,
"total_flos": 7.971478934745252e+17,
"train_loss": 0.6237873245168615,
"train_runtime": 2852.5229,
"train_samples_per_second": 6.048,
"train_steps_per_second": 0.095
}
],
"logging_steps": 5,
"max_steps": 270,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.971478934745252e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}