tomtseng's picture
Model save
9a0397c verified
raw
history blame
12.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5886869425495271,
"eval_steps": 500,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 1.999992497128677e-05,
"loss": 1.9904,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 1.9998124338461647e-05,
"loss": 1.3616,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 1.9992498057467824e-05,
"loss": 1.2418,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 1.9983123267618306e-05,
"loss": 1.2313,
"step": 15
},
{
"epoch": 0.02,
"learning_rate": 1.997000348569964e-05,
"loss": 1.2068,
"step": 20
},
{
"epoch": 0.03,
"learning_rate": 1.99531436333659e-05,
"loss": 1.2108,
"step": 25
},
{
"epoch": 0.04,
"learning_rate": 1.9932550035292393e-05,
"loss": 1.2029,
"step": 30
},
{
"epoch": 0.04,
"learning_rate": 1.9908230416803093e-05,
"loss": 1.2202,
"step": 35
},
{
"epoch": 0.05,
"learning_rate": 1.98801939009726e-05,
"loss": 1.1875,
"step": 40
},
{
"epoch": 0.06,
"learning_rate": 1.9848451005203795e-05,
"loss": 1.2031,
"step": 45
},
{
"epoch": 0.06,
"learning_rate": 1.9813013637282426e-05,
"loss": 1.1959,
"step": 50
},
{
"epoch": 0.07,
"learning_rate": 1.9773895090910098e-05,
"loss": 1.1819,
"step": 55
},
{
"epoch": 0.07,
"learning_rate": 1.9731110040717384e-05,
"loss": 1.1995,
"step": 60
},
{
"epoch": 0.08,
"learning_rate": 1.9684674536758894e-05,
"loss": 1.1917,
"step": 65
},
{
"epoch": 0.09,
"learning_rate": 1.9634605998492386e-05,
"loss": 1.1744,
"step": 70
},
{
"epoch": 0.09,
"learning_rate": 1.958092320824417e-05,
"loss": 1.193,
"step": 75
},
{
"epoch": 0.1,
"learning_rate": 1.952364630416322e-05,
"loss": 1.1813,
"step": 80
},
{
"epoch": 0.1,
"learning_rate": 1.9462796772666746e-05,
"loss": 1.198,
"step": 85
},
{
"epoch": 0.11,
"learning_rate": 1.9398397440379923e-05,
"loss": 1.169,
"step": 90
},
{
"epoch": 0.12,
"learning_rate": 1.9330472465572883e-05,
"loss": 1.2003,
"step": 95
},
{
"epoch": 0.12,
"learning_rate": 1.9259047329098173e-05,
"loss": 1.1858,
"step": 100
},
{
"epoch": 0.13,
"learning_rate": 1.918414882483206e-05,
"loss": 1.1782,
"step": 105
},
{
"epoch": 0.14,
"learning_rate": 1.91058050496233e-05,
"loss": 1.1812,
"step": 110
},
{
"epoch": 0.14,
"learning_rate": 1.902404539275307e-05,
"loss": 1.1631,
"step": 115
},
{
"epoch": 0.15,
"learning_rate": 1.8938900524910128e-05,
"loss": 1.1792,
"step": 120
},
{
"epoch": 0.15,
"learning_rate": 1.8850402386685235e-05,
"loss": 1.1647,
"step": 125
},
{
"epoch": 0.16,
"learning_rate": 1.875858417658921e-05,
"loss": 1.1736,
"step": 130
},
{
"epoch": 0.17,
"learning_rate": 1.866348033859909e-05,
"loss": 1.1814,
"step": 135
},
{
"epoch": 0.17,
"learning_rate": 1.8565126549237092e-05,
"loss": 1.1892,
"step": 140
},
{
"epoch": 0.18,
"learning_rate": 1.846355970418718e-05,
"loss": 1.1637,
"step": 145
},
{
"epoch": 0.18,
"learning_rate": 1.8358817904454328e-05,
"loss": 1.183,
"step": 150
},
{
"epoch": 0.19,
"learning_rate": 1.825094044207158e-05,
"loss": 1.1715,
"step": 155
},
{
"epoch": 0.2,
"learning_rate": 1.8139967785360338e-05,
"loss": 1.1705,
"step": 160
},
{
"epoch": 0.2,
"learning_rate": 1.8025941563749407e-05,
"loss": 1.1707,
"step": 165
},
{
"epoch": 0.21,
"learning_rate": 1.7908904552158435e-05,
"loss": 1.1608,
"step": 170
},
{
"epoch": 0.22,
"learning_rate": 1.7788900654951664e-05,
"loss": 1.1802,
"step": 175
},
{
"epoch": 0.22,
"learning_rate": 1.766597488946798e-05,
"loss": 1.1874,
"step": 180
},
{
"epoch": 0.23,
"learning_rate": 1.7540173369133463e-05,
"loss": 1.1827,
"step": 185
},
{
"epoch": 0.23,
"learning_rate": 1.741154328616274e-05,
"loss": 1.1845,
"step": 190
},
{
"epoch": 0.24,
"learning_rate": 1.728013289385568e-05,
"loss": 1.1708,
"step": 195
},
{
"epoch": 0.25,
"learning_rate": 1.7145991488495997e-05,
"loss": 1.1571,
"step": 200
},
{
"epoch": 0.25,
"learning_rate": 1.7009169390858635e-05,
"loss": 1.1865,
"step": 205
},
{
"epoch": 0.26,
"learning_rate": 1.6869717927332825e-05,
"loss": 1.164,
"step": 210
},
{
"epoch": 0.26,
"learning_rate": 1.6727689410667887e-05,
"loss": 1.1662,
"step": 215
},
{
"epoch": 0.27,
"learning_rate": 1.6583137120349028e-05,
"loss": 1.1772,
"step": 220
},
{
"epoch": 0.28,
"learning_rate": 1.64361152826105e-05,
"loss": 1.1799,
"step": 225
},
{
"epoch": 0.28,
"learning_rate": 1.6286679050093573e-05,
"loss": 1.1738,
"step": 230
},
{
"epoch": 0.29,
"learning_rate": 1.6134884481156994e-05,
"loss": 1.1841,
"step": 235
},
{
"epoch": 0.3,
"learning_rate": 1.5980788518847706e-05,
"loss": 1.1595,
"step": 240
},
{
"epoch": 0.3,
"learning_rate": 1.5824448969539653e-05,
"loss": 1.1644,
"step": 245
},
{
"epoch": 0.31,
"learning_rate": 1.566592448124874e-05,
"loss": 1.1873,
"step": 250
},
{
"epoch": 0.31,
"learning_rate": 1.550527452163209e-05,
"loss": 1.1694,
"step": 255
},
{
"epoch": 0.32,
"learning_rate": 1.5342559355679776e-05,
"loss": 1.1643,
"step": 260
},
{
"epoch": 0.33,
"learning_rate": 1.5177840023107498e-05,
"loss": 1.1579,
"step": 265
},
{
"epoch": 0.33,
"learning_rate": 1.5011178315458601e-05,
"loss": 1.1719,
"step": 270
},
{
"epoch": 0.34,
"learning_rate": 1.4842636752924073e-05,
"loss": 1.1536,
"step": 275
},
{
"epoch": 0.34,
"learning_rate": 1.467227856088921e-05,
"loss": 1.1513,
"step": 280
},
{
"epoch": 0.35,
"learning_rate": 1.4500167646215722e-05,
"loss": 1.1759,
"step": 285
},
{
"epoch": 0.36,
"learning_rate": 1.4326368573268199e-05,
"loss": 1.1697,
"step": 290
},
{
"epoch": 0.36,
"learning_rate": 1.415094653969395e-05,
"loss": 1.1654,
"step": 295
},
{
"epoch": 0.37,
"learning_rate": 1.397396735196525e-05,
"loss": 1.169,
"step": 300
},
{
"epoch": 0.38,
"learning_rate": 1.3795497400693198e-05,
"loss": 1.1552,
"step": 305
},
{
"epoch": 0.38,
"learning_rate": 1.3615603635722463e-05,
"loss": 1.1628,
"step": 310
},
{
"epoch": 0.39,
"learning_rate": 1.3434353541016238e-05,
"loss": 1.1652,
"step": 315
},
{
"epoch": 0.39,
"learning_rate": 1.3251815109340813e-05,
"loss": 1.1706,
"step": 320
},
{
"epoch": 0.4,
"learning_rate": 1.3068056816759303e-05,
"loss": 1.1815,
"step": 325
},
{
"epoch": 0.41,
"learning_rate": 1.2883147596944054e-05,
"loss": 1.1647,
"step": 330
},
{
"epoch": 0.41,
"learning_rate": 1.2697156815317409e-05,
"loss": 1.1758,
"step": 335
},
{
"epoch": 0.42,
"learning_rate": 1.2510154243030482e-05,
"loss": 1.1748,
"step": 340
},
{
"epoch": 0.42,
"learning_rate": 1.2322210030789759e-05,
"loss": 1.1577,
"step": 345
},
{
"epoch": 0.43,
"learning_rate": 1.2133394682541287e-05,
"loss": 1.1678,
"step": 350
},
{
"epoch": 0.44,
"learning_rate": 1.194377902902238e-05,
"loss": 1.1527,
"step": 355
},
{
"epoch": 0.44,
"learning_rate": 1.1753434201190716e-05,
"loss": 1.1674,
"step": 360
},
{
"epoch": 0.45,
"learning_rate": 1.1562431603540807e-05,
"loss": 1.1597,
"step": 365
},
{
"epoch": 0.46,
"learning_rate": 1.1370842887317888e-05,
"loss": 1.1685,
"step": 370
},
{
"epoch": 0.46,
"learning_rate": 1.117873992363919e-05,
"loss": 1.1846,
"step": 375
},
{
"epoch": 0.47,
"learning_rate": 1.09861947765328e-05,
"loss": 1.1799,
"step": 380
},
{
"epoch": 0.47,
"learning_rate": 1.0793279675904072e-05,
"loss": 1.1519,
"step": 385
},
{
"epoch": 0.48,
"learning_rate": 1.0600066990439895e-05,
"loss": 1.1687,
"step": 390
},
{
"epoch": 0.49,
"learning_rate": 1.0406629200460836e-05,
"loss": 1.1622,
"step": 395
},
{
"epoch": 0.49,
"learning_rate": 1.0213038870731443e-05,
"loss": 1.1644,
"step": 400
},
{
"epoch": 0.5,
"learning_rate": 1.0019368623238845e-05,
"loss": 1.1655,
"step": 405
},
{
"epoch": 0.5,
"learning_rate": 9.82569110994992e-06,
"loss": 1.1667,
"step": 410
},
{
"epoch": 0.51,
"learning_rate": 9.632078985557163e-06,
"loss": 1.1582,
"step": 415
},
{
"epoch": 0.52,
"learning_rate": 9.438604880223595e-06,
"loss": 1.1782,
"step": 420
},
{
"epoch": 0.52,
"learning_rate": 9.245341372336829e-06,
"loss": 1.1699,
"step": 425
},
{
"epoch": 0.53,
"learning_rate": 9.052360961282558e-06,
"loss": 1.1807,
"step": 430
},
{
"epoch": 0.54,
"learning_rate": 8.859736040247719e-06,
"loss": 1.1545,
"step": 435
},
{
"epoch": 0.54,
"learning_rate": 8.667538869063456e-06,
"loss": 1.1597,
"step": 440
},
{
"epoch": 0.55,
"learning_rate": 8.475841547098128e-06,
"loss": 1.1577,
"step": 445
},
{
"epoch": 0.55,
"learning_rate": 8.28471598621049e-06,
"loss": 1.1529,
"step": 450
},
{
"epoch": 0.56,
"learning_rate": 8.094233883773255e-06,
"loss": 1.1742,
"step": 455
},
{
"epoch": 0.57,
"learning_rate": 7.904466695777082e-06,
"loss": 1.1447,
"step": 460
},
{
"epoch": 0.57,
"learning_rate": 7.715485610025124e-06,
"loss": 1.1805,
"step": 465
},
{
"epoch": 0.58,
"learning_rate": 7.5273615194281845e-06,
"loss": 1.152,
"step": 470
},
{
"epoch": 0.58,
"learning_rate": 7.340164995410497e-06,
"loss": 1.1633,
"step": 475
},
{
"epoch": 0.59,
"eval_loss": 1.1676266193389893,
"eval_runtime": 311.0032,
"eval_samples_per_second": 74.308,
"eval_steps_per_second": 18.579,
"step": 478
},
{
"epoch": 0.59,
"step": 478,
"total_flos": 4.175515781527765e+18,
"train_loss": 1.1792220218909835,
"train_runtime": 42494.0036,
"train_samples_per_second": 4.892,
"train_steps_per_second": 0.019
}
],
"logging_steps": 5,
"max_steps": 811,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"total_flos": 4.175515781527765e+18,
"train_batch_size": null,
"trial_name": null,
"trial_params": null
}