rhdeng's picture
Initial model upload
29e12ec verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.035623409669211,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05089058524173028,
"grad_norm": 2.2103662490844727,
"learning_rate": 0.0001999771640780308,
"loss": 3.0938,
"num_input_tokens_seen": 190176,
"step": 5
},
{
"epoch": 0.10178117048346055,
"grad_norm": 0.8035047054290771,
"learning_rate": 0.0001998844110196681,
"loss": 1.471,
"num_input_tokens_seen": 378656,
"step": 10
},
{
"epoch": 0.15267175572519084,
"grad_norm": 0.44624239206314087,
"learning_rate": 0.00019972037971811802,
"loss": 1.2452,
"num_input_tokens_seen": 570208,
"step": 15
},
{
"epoch": 0.2035623409669211,
"grad_norm": 0.41621240973472595,
"learning_rate": 0.00019948518722731206,
"loss": 1.1957,
"num_input_tokens_seen": 763040,
"step": 20
},
{
"epoch": 0.2544529262086514,
"grad_norm": 0.3875686824321747,
"learning_rate": 0.0001991790013823246,
"loss": 1.1319,
"num_input_tokens_seen": 959200,
"step": 25
},
{
"epoch": 0.3053435114503817,
"grad_norm": 0.4382984936237335,
"learning_rate": 0.00019880204067960472,
"loss": 1.1209,
"num_input_tokens_seen": 1153728,
"step": 30
},
{
"epoch": 0.356234096692112,
"grad_norm": 0.45683977007865906,
"learning_rate": 0.00019835457412105528,
"loss": 1.123,
"num_input_tokens_seen": 1351616,
"step": 35
},
{
"epoch": 0.4071246819338422,
"grad_norm": 0.4628298580646515,
"learning_rate": 0.00019783692102207155,
"loss": 1.0532,
"num_input_tokens_seen": 1537696,
"step": 40
},
{
"epoch": 0.4580152671755725,
"grad_norm": 0.5620412826538086,
"learning_rate": 0.00019724945078367513,
"loss": 1.0805,
"num_input_tokens_seen": 1730144,
"step": 45
},
{
"epoch": 0.5089058524173028,
"grad_norm": 0.4734291732311249,
"learning_rate": 0.00019659258262890683,
"loss": 1.0572,
"num_input_tokens_seen": 1916608,
"step": 50
},
{
"epoch": 0.5597964376590331,
"grad_norm": 0.46405333280563354,
"learning_rate": 0.00019586678530366606,
"loss": 1.0069,
"num_input_tokens_seen": 2103712,
"step": 55
},
{
"epoch": 0.6106870229007634,
"grad_norm": 0.4856356084346771,
"learning_rate": 0.00019507257674221027,
"loss": 1.0293,
"num_input_tokens_seen": 2289888,
"step": 60
},
{
"epoch": 0.6615776081424937,
"grad_norm": 0.4761466085910797,
"learning_rate": 0.00019421052369755334,
"loss": 0.9994,
"num_input_tokens_seen": 2477888,
"step": 65
},
{
"epoch": 0.712468193384224,
"grad_norm": 0.5239170789718628,
"learning_rate": 0.0001932812413370265,
"loss": 1.0159,
"num_input_tokens_seen": 2662848,
"step": 70
},
{
"epoch": 0.7633587786259542,
"grad_norm": 0.4161457419395447,
"learning_rate": 0.00019228539280329038,
"loss": 1.0233,
"num_input_tokens_seen": 2861024,
"step": 75
},
{
"epoch": 0.8142493638676844,
"grad_norm": 0.5579718351364136,
"learning_rate": 0.00019122368874111172,
"loss": 1.0109,
"num_input_tokens_seen": 3043712,
"step": 80
},
{
"epoch": 0.8651399491094147,
"grad_norm": 0.4595694839954376,
"learning_rate": 0.0001900968867902419,
"loss": 1.0132,
"num_input_tokens_seen": 3231840,
"step": 85
},
{
"epoch": 0.916030534351145,
"grad_norm": 0.49271535873413086,
"learning_rate": 0.00018890579104475995,
"loss": 1.0079,
"num_input_tokens_seen": 3429632,
"step": 90
},
{
"epoch": 0.9669211195928753,
"grad_norm": 0.5259862542152405,
"learning_rate": 0.00018765125147926476,
"loss": 0.9515,
"num_input_tokens_seen": 3620000,
"step": 95
},
{
"epoch": 1.0178117048346056,
"grad_norm": 0.444477379322052,
"learning_rate": 0.00018633416334232753,
"loss": 0.93,
"num_input_tokens_seen": 3809824,
"step": 100
},
{
"epoch": 1.0687022900763359,
"grad_norm": 0.42680156230926514,
"learning_rate": 0.0001849554665176354,
"loss": 0.8925,
"num_input_tokens_seen": 4005472,
"step": 105
},
{
"epoch": 1.1195928753180662,
"grad_norm": 0.6088462471961975,
"learning_rate": 0.00018351614485328388,
"loss": 0.8715,
"num_input_tokens_seen": 4192480,
"step": 110
},
{
"epoch": 1.1704834605597965,
"grad_norm": 0.5572230815887451,
"learning_rate": 0.0001820172254596956,
"loss": 0.8569,
"num_input_tokens_seen": 4379296,
"step": 115
},
{
"epoch": 1.2213740458015268,
"grad_norm": 0.5509177446365356,
"learning_rate": 0.00018045977797666684,
"loss": 0.8269,
"num_input_tokens_seen": 4568352,
"step": 120
},
{
"epoch": 1.272264631043257,
"grad_norm": 0.67293781042099,
"learning_rate": 0.00017884491381006478,
"loss": 0.84,
"num_input_tokens_seen": 4752928,
"step": 125
},
{
"epoch": 1.3231552162849873,
"grad_norm": 0.5845524668693542,
"learning_rate": 0.00017717378533872017,
"loss": 0.8243,
"num_input_tokens_seen": 4939008,
"step": 130
},
{
"epoch": 1.3740458015267176,
"grad_norm": 0.5623295307159424,
"learning_rate": 0.00017544758509208146,
"loss": 0.8624,
"num_input_tokens_seen": 5133664,
"step": 135
},
{
"epoch": 1.424936386768448,
"grad_norm": 0.5870885848999023,
"learning_rate": 0.00017366754489921694,
"loss": 0.8543,
"num_input_tokens_seen": 5332000,
"step": 140
},
{
"epoch": 1.4758269720101782,
"grad_norm": 0.5874502062797546,
"learning_rate": 0.00017183493500977278,
"loss": 0.842,
"num_input_tokens_seen": 5520640,
"step": 145
},
{
"epoch": 1.5267175572519083,
"grad_norm": 0.5786451101303101,
"learning_rate": 0.0001699510631875134,
"loss": 0.8268,
"num_input_tokens_seen": 5710528,
"step": 150
},
{
"epoch": 1.5776081424936388,
"grad_norm": 0.5929097533226013,
"learning_rate": 0.00016801727377709194,
"loss": 0.8238,
"num_input_tokens_seen": 5907264,
"step": 155
},
{
"epoch": 1.6284987277353689,
"grad_norm": 0.570965051651001,
"learning_rate": 0.00016603494674471593,
"loss": 0.8324,
"num_input_tokens_seen": 6095776,
"step": 160
},
{
"epoch": 1.6793893129770994,
"grad_norm": 0.6001121401786804,
"learning_rate": 0.0001640054966933935,
"loss": 0.8431,
"num_input_tokens_seen": 6281280,
"step": 165
},
{
"epoch": 1.7302798982188294,
"grad_norm": 0.5865895748138428,
"learning_rate": 0.00016193037185346224,
"loss": 0.8054,
"num_input_tokens_seen": 6470368,
"step": 170
},
{
"epoch": 1.78117048346056,
"grad_norm": 0.6706877946853638,
"learning_rate": 0.00015981105304912162,
"loss": 0.8255,
"num_input_tokens_seen": 6658944,
"step": 175
},
{
"epoch": 1.83206106870229,
"grad_norm": 0.6549966931343079,
"learning_rate": 0.0001576490526417059,
"loss": 0.8334,
"num_input_tokens_seen": 6842880,
"step": 180
},
{
"epoch": 1.8829516539440203,
"grad_norm": 0.6813002228736877,
"learning_rate": 0.0001554459134504523,
"loss": 0.8354,
"num_input_tokens_seen": 7034432,
"step": 185
},
{
"epoch": 1.9338422391857506,
"grad_norm": 0.6717746257781982,
"learning_rate": 0.00015320320765153367,
"loss": 0.8294,
"num_input_tokens_seen": 7228672,
"step": 190
},
{
"epoch": 1.984732824427481,
"grad_norm": 0.6509274840354919,
"learning_rate": 0.00015092253565614233,
"loss": 0.8561,
"num_input_tokens_seen": 7414144,
"step": 195
},
{
"epoch": 2.035623409669211,
"grad_norm": 0.6088241338729858,
"learning_rate": 0.00014860552496842494,
"loss": 0.6968,
"num_input_tokens_seen": 7603104,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 588,
"num_input_tokens_seen": 7603104,
"num_train_epochs": 6,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.433210283043062e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}