winddude's picture
init upload
acd2f6c
{
"best_metric": 0.6928293704986572,
"best_model_checkpoint": "/home/llmadmin/models/loras-trained/wizardLM-llama-lora-13b/checkpoint-1400",
"epoch": 2.4705882352941178,
"global_step": 1400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.6999999999999996e-05,
"loss": 1.2009,
"step": 10
},
{
"epoch": 0.04,
"learning_rate": 5.6999999999999996e-05,
"loss": 1.198,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 8.699999999999999e-05,
"loss": 1.1318,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 0.000117,
"loss": 1.0446,
"step": 40
},
{
"epoch": 0.09,
"learning_rate": 0.000147,
"loss": 0.9254,
"step": 50
},
{
"epoch": 0.11,
"learning_rate": 0.00017699999999999997,
"loss": 0.8364,
"step": 60
},
{
"epoch": 0.12,
"learning_rate": 0.00020699999999999996,
"loss": 0.818,
"step": 70
},
{
"epoch": 0.14,
"learning_rate": 0.000237,
"loss": 0.8094,
"step": 80
},
{
"epoch": 0.16,
"learning_rate": 0.000267,
"loss": 0.7891,
"step": 90
},
{
"epoch": 0.18,
"learning_rate": 0.00029699999999999996,
"loss": 0.7915,
"step": 100
},
{
"epoch": 0.19,
"learning_rate": 0.0002983103879849812,
"loss": 0.777,
"step": 110
},
{
"epoch": 0.21,
"learning_rate": 0.000296433041301627,
"loss": 0.7809,
"step": 120
},
{
"epoch": 0.23,
"learning_rate": 0.0002945556946182728,
"loss": 0.7705,
"step": 130
},
{
"epoch": 0.25,
"learning_rate": 0.0002926783479349186,
"loss": 0.7545,
"step": 140
},
{
"epoch": 0.26,
"learning_rate": 0.00029080100125156443,
"loss": 0.7584,
"step": 150
},
{
"epoch": 0.28,
"learning_rate": 0.00028892365456821025,
"loss": 0.751,
"step": 160
},
{
"epoch": 0.3,
"learning_rate": 0.00028704630788485607,
"loss": 0.757,
"step": 170
},
{
"epoch": 0.32,
"learning_rate": 0.00028516896120150183,
"loss": 0.7446,
"step": 180
},
{
"epoch": 0.34,
"learning_rate": 0.00028329161451814765,
"loss": 0.7411,
"step": 190
},
{
"epoch": 0.35,
"learning_rate": 0.0002814142678347935,
"loss": 0.754,
"step": 200
},
{
"epoch": 0.35,
"eval_loss": 0.7339245676994324,
"eval_runtime": 65.2861,
"eval_samples_per_second": 30.634,
"eval_steps_per_second": 0.766,
"step": 200
},
{
"epoch": 0.37,
"learning_rate": 0.00027953692115143924,
"loss": 0.7487,
"step": 210
},
{
"epoch": 0.39,
"learning_rate": 0.00027765957446808506,
"loss": 0.7478,
"step": 220
},
{
"epoch": 0.41,
"learning_rate": 0.0002757822277847309,
"loss": 0.7336,
"step": 230
},
{
"epoch": 0.42,
"learning_rate": 0.0002739048811013767,
"loss": 0.7386,
"step": 240
},
{
"epoch": 0.44,
"learning_rate": 0.0002720275344180225,
"loss": 0.7359,
"step": 250
},
{
"epoch": 0.46,
"learning_rate": 0.0002701501877346683,
"loss": 0.7455,
"step": 260
},
{
"epoch": 0.48,
"learning_rate": 0.0002682728410513141,
"loss": 0.733,
"step": 270
},
{
"epoch": 0.49,
"learning_rate": 0.0002663954943679599,
"loss": 0.7332,
"step": 280
},
{
"epoch": 0.51,
"learning_rate": 0.00026451814768460575,
"loss": 0.7253,
"step": 290
},
{
"epoch": 0.53,
"learning_rate": 0.00026264080100125157,
"loss": 0.732,
"step": 300
},
{
"epoch": 0.55,
"learning_rate": 0.00026076345431789733,
"loss": 0.7271,
"step": 310
},
{
"epoch": 0.56,
"learning_rate": 0.00025888610763454315,
"loss": 0.7365,
"step": 320
},
{
"epoch": 0.58,
"learning_rate": 0.00025700876095118897,
"loss": 0.7266,
"step": 330
},
{
"epoch": 0.6,
"learning_rate": 0.0002551314142678348,
"loss": 0.733,
"step": 340
},
{
"epoch": 0.62,
"learning_rate": 0.00025325406758448056,
"loss": 0.7343,
"step": 350
},
{
"epoch": 0.64,
"learning_rate": 0.0002513767209011264,
"loss": 0.723,
"step": 360
},
{
"epoch": 0.65,
"learning_rate": 0.0002494993742177722,
"loss": 0.7391,
"step": 370
},
{
"epoch": 0.67,
"learning_rate": 0.000247622027534418,
"loss": 0.7265,
"step": 380
},
{
"epoch": 0.69,
"learning_rate": 0.00024574468085106384,
"loss": 0.7283,
"step": 390
},
{
"epoch": 0.71,
"learning_rate": 0.0002438673341677096,
"loss": 0.7306,
"step": 400
},
{
"epoch": 0.71,
"eval_loss": 0.7154878377914429,
"eval_runtime": 65.4382,
"eval_samples_per_second": 30.563,
"eval_steps_per_second": 0.764,
"step": 400
},
{
"epoch": 0.72,
"learning_rate": 0.00024198998748435542,
"loss": 0.725,
"step": 410
},
{
"epoch": 0.74,
"learning_rate": 0.00024011264080100122,
"loss": 0.7275,
"step": 420
},
{
"epoch": 0.76,
"learning_rate": 0.000238235294117647,
"loss": 0.7188,
"step": 430
},
{
"epoch": 0.78,
"learning_rate": 0.00023635794743429286,
"loss": 0.7216,
"step": 440
},
{
"epoch": 0.79,
"learning_rate": 0.00023448060075093865,
"loss": 0.7132,
"step": 450
},
{
"epoch": 0.81,
"learning_rate": 0.00023260325406758447,
"loss": 0.7372,
"step": 460
},
{
"epoch": 0.83,
"learning_rate": 0.00023072590738423026,
"loss": 0.7111,
"step": 470
},
{
"epoch": 0.85,
"learning_rate": 0.00022884856070087608,
"loss": 0.7131,
"step": 480
},
{
"epoch": 0.86,
"learning_rate": 0.00022697121401752188,
"loss": 0.7193,
"step": 490
},
{
"epoch": 0.88,
"learning_rate": 0.00022509386733416767,
"loss": 0.7152,
"step": 500
},
{
"epoch": 0.9,
"learning_rate": 0.00022321652065081352,
"loss": 0.7234,
"step": 510
},
{
"epoch": 0.92,
"learning_rate": 0.0002213391739674593,
"loss": 0.7193,
"step": 520
},
{
"epoch": 0.94,
"learning_rate": 0.00021946182728410513,
"loss": 0.7208,
"step": 530
},
{
"epoch": 0.95,
"learning_rate": 0.00021758448060075092,
"loss": 0.7205,
"step": 540
},
{
"epoch": 0.97,
"learning_rate": 0.00021570713391739672,
"loss": 0.7154,
"step": 550
},
{
"epoch": 0.99,
"learning_rate": 0.00021382978723404254,
"loss": 0.7224,
"step": 560
},
{
"epoch": 1.01,
"learning_rate": 0.00021195244055068833,
"loss": 0.7015,
"step": 570
},
{
"epoch": 1.02,
"learning_rate": 0.00021007509386733418,
"loss": 0.7168,
"step": 580
},
{
"epoch": 1.04,
"learning_rate": 0.00020819774718397997,
"loss": 0.7281,
"step": 590
},
{
"epoch": 1.06,
"learning_rate": 0.00020632040050062576,
"loss": 0.7112,
"step": 600
},
{
"epoch": 1.06,
"eval_loss": 0.7068994045257568,
"eval_runtime": 65.2314,
"eval_samples_per_second": 30.66,
"eval_steps_per_second": 0.767,
"step": 600
},
{
"epoch": 1.08,
"learning_rate": 0.00020444305381727158,
"loss": 0.7133,
"step": 610
},
{
"epoch": 1.09,
"learning_rate": 0.00020256570713391738,
"loss": 0.7047,
"step": 620
},
{
"epoch": 1.11,
"learning_rate": 0.00020068836045056317,
"loss": 0.7163,
"step": 630
},
{
"epoch": 1.13,
"learning_rate": 0.000198811013767209,
"loss": 0.7168,
"step": 640
},
{
"epoch": 1.15,
"learning_rate": 0.00019693366708385478,
"loss": 0.7028,
"step": 650
},
{
"epoch": 1.16,
"learning_rate": 0.00019505632040050063,
"loss": 0.7064,
"step": 660
},
{
"epoch": 1.18,
"learning_rate": 0.00019317897371714642,
"loss": 0.7127,
"step": 670
},
{
"epoch": 1.2,
"learning_rate": 0.00019130162703379222,
"loss": 0.7193,
"step": 680
},
{
"epoch": 1.22,
"learning_rate": 0.00018942428035043804,
"loss": 0.6966,
"step": 690
},
{
"epoch": 1.24,
"learning_rate": 0.00018754693366708383,
"loss": 0.7104,
"step": 700
},
{
"epoch": 1.25,
"learning_rate": 0.00018566958698372962,
"loss": 0.7076,
"step": 710
},
{
"epoch": 1.27,
"learning_rate": 0.00018379224030037544,
"loss": 0.6971,
"step": 720
},
{
"epoch": 1.29,
"learning_rate": 0.00018191489361702126,
"loss": 0.6941,
"step": 730
},
{
"epoch": 1.31,
"learning_rate": 0.00018003754693366708,
"loss": 0.6992,
"step": 740
},
{
"epoch": 1.32,
"learning_rate": 0.00017816020025031287,
"loss": 0.7059,
"step": 750
},
{
"epoch": 1.34,
"learning_rate": 0.0001762828535669587,
"loss": 0.6936,
"step": 760
},
{
"epoch": 1.36,
"learning_rate": 0.0001744055068836045,
"loss": 0.6961,
"step": 770
},
{
"epoch": 1.38,
"learning_rate": 0.00017252816020025028,
"loss": 0.7125,
"step": 780
},
{
"epoch": 1.39,
"learning_rate": 0.0001706508135168961,
"loss": 0.7046,
"step": 790
},
{
"epoch": 1.41,
"learning_rate": 0.00016877346683354192,
"loss": 0.7034,
"step": 800
},
{
"epoch": 1.41,
"eval_loss": 0.7016428709030151,
"eval_runtime": 65.2245,
"eval_samples_per_second": 30.663,
"eval_steps_per_second": 0.767,
"step": 800
},
{
"epoch": 1.43,
"learning_rate": 0.00016689612015018774,
"loss": 0.7011,
"step": 810
},
{
"epoch": 1.45,
"learning_rate": 0.00016501877346683353,
"loss": 0.7049,
"step": 820
},
{
"epoch": 1.46,
"learning_rate": 0.00016314142678347933,
"loss": 0.6965,
"step": 830
},
{
"epoch": 1.48,
"learning_rate": 0.00016126408010012515,
"loss": 0.6965,
"step": 840
},
{
"epoch": 1.5,
"learning_rate": 0.00015938673341677094,
"loss": 0.6975,
"step": 850
},
{
"epoch": 1.52,
"learning_rate": 0.00015750938673341673,
"loss": 0.7094,
"step": 860
},
{
"epoch": 1.54,
"learning_rate": 0.00015563204005006258,
"loss": 0.6918,
"step": 870
},
{
"epoch": 1.55,
"learning_rate": 0.00015375469336670837,
"loss": 0.7031,
"step": 880
},
{
"epoch": 1.57,
"learning_rate": 0.0001518773466833542,
"loss": 0.7027,
"step": 890
},
{
"epoch": 1.59,
"learning_rate": 0.00015,
"loss": 0.6996,
"step": 900
},
{
"epoch": 1.61,
"learning_rate": 0.00014812265331664578,
"loss": 0.7048,
"step": 910
},
{
"epoch": 1.62,
"learning_rate": 0.0001462453066332916,
"loss": 0.6962,
"step": 920
},
{
"epoch": 1.64,
"learning_rate": 0.00014436795994993742,
"loss": 0.6918,
"step": 930
},
{
"epoch": 1.66,
"learning_rate": 0.0001424906132665832,
"loss": 0.6979,
"step": 940
},
{
"epoch": 1.68,
"learning_rate": 0.000140613266583229,
"loss": 0.6958,
"step": 950
},
{
"epoch": 1.69,
"learning_rate": 0.00013873591989987483,
"loss": 0.7027,
"step": 960
},
{
"epoch": 1.71,
"learning_rate": 0.00013685857321652065,
"loss": 0.7092,
"step": 970
},
{
"epoch": 1.73,
"learning_rate": 0.00013498122653316644,
"loss": 0.6917,
"step": 980
},
{
"epoch": 1.75,
"learning_rate": 0.00013310387984981226,
"loss": 0.6996,
"step": 990
},
{
"epoch": 1.76,
"learning_rate": 0.00013122653316645805,
"loss": 0.6889,
"step": 1000
},
{
"epoch": 1.76,
"eval_loss": 0.6975076794624329,
"eval_runtime": 65.2345,
"eval_samples_per_second": 30.659,
"eval_steps_per_second": 0.766,
"step": 1000
},
{
"epoch": 1.78,
"learning_rate": 0.00012934918648310387,
"loss": 0.7083,
"step": 1010
},
{
"epoch": 1.8,
"learning_rate": 0.00012747183979974967,
"loss": 0.6934,
"step": 1020
},
{
"epoch": 1.82,
"learning_rate": 0.00012559449311639549,
"loss": 0.7002,
"step": 1030
},
{
"epoch": 1.84,
"learning_rate": 0.00012371714643304128,
"loss": 0.6899,
"step": 1040
},
{
"epoch": 1.85,
"learning_rate": 0.0001218397997496871,
"loss": 0.6856,
"step": 1050
},
{
"epoch": 1.87,
"learning_rate": 0.00011996245306633289,
"loss": 0.695,
"step": 1060
},
{
"epoch": 1.89,
"learning_rate": 0.00011808510638297871,
"loss": 0.6934,
"step": 1070
},
{
"epoch": 1.91,
"learning_rate": 0.00011620775969962452,
"loss": 0.6933,
"step": 1080
},
{
"epoch": 1.92,
"learning_rate": 0.00011433041301627033,
"loss": 0.6983,
"step": 1090
},
{
"epoch": 1.94,
"learning_rate": 0.00011245306633291615,
"loss": 0.705,
"step": 1100
},
{
"epoch": 1.96,
"learning_rate": 0.00011057571964956194,
"loss": 0.6944,
"step": 1110
},
{
"epoch": 1.98,
"learning_rate": 0.00010869837296620774,
"loss": 0.6977,
"step": 1120
},
{
"epoch": 1.99,
"learning_rate": 0.00010682102628285355,
"loss": 0.6966,
"step": 1130
},
{
"epoch": 2.01,
"learning_rate": 0.00010494367959949937,
"loss": 0.6868,
"step": 1140
},
{
"epoch": 2.03,
"learning_rate": 0.00010306633291614518,
"loss": 0.6802,
"step": 1150
},
{
"epoch": 2.05,
"learning_rate": 0.00010118898623279097,
"loss": 0.6916,
"step": 1160
},
{
"epoch": 2.06,
"learning_rate": 9.931163954943679e-05,
"loss": 0.7018,
"step": 1170
},
{
"epoch": 2.08,
"learning_rate": 9.74342928660826e-05,
"loss": 0.6951,
"step": 1180
},
{
"epoch": 2.1,
"learning_rate": 9.55569461827284e-05,
"loss": 0.6906,
"step": 1190
},
{
"epoch": 2.12,
"learning_rate": 9.36795994993742e-05,
"loss": 0.6965,
"step": 1200
},
{
"epoch": 2.12,
"eval_loss": 0.6952534317970276,
"eval_runtime": 65.233,
"eval_samples_per_second": 30.659,
"eval_steps_per_second": 0.766,
"step": 1200
},
{
"epoch": 2.14,
"learning_rate": 9.180225281602002e-05,
"loss": 0.6969,
"step": 1210
},
{
"epoch": 2.15,
"learning_rate": 8.992490613266582e-05,
"loss": 0.6924,
"step": 1220
},
{
"epoch": 2.17,
"learning_rate": 8.804755944931163e-05,
"loss": 0.6956,
"step": 1230
},
{
"epoch": 2.19,
"learning_rate": 8.617021276595745e-05,
"loss": 0.6943,
"step": 1240
},
{
"epoch": 2.21,
"learning_rate": 8.429286608260324e-05,
"loss": 0.695,
"step": 1250
},
{
"epoch": 2.22,
"learning_rate": 8.241551939924905e-05,
"loss": 0.6897,
"step": 1260
},
{
"epoch": 2.24,
"learning_rate": 8.053817271589486e-05,
"loss": 0.6922,
"step": 1270
},
{
"epoch": 2.26,
"learning_rate": 7.866082603254068e-05,
"loss": 0.6912,
"step": 1280
},
{
"epoch": 2.28,
"learning_rate": 7.678347934918648e-05,
"loss": 0.6837,
"step": 1290
},
{
"epoch": 2.29,
"learning_rate": 7.490613266583228e-05,
"loss": 0.69,
"step": 1300
},
{
"epoch": 2.31,
"learning_rate": 7.30287859824781e-05,
"loss": 0.6968,
"step": 1310
},
{
"epoch": 2.33,
"learning_rate": 7.11514392991239e-05,
"loss": 0.6898,
"step": 1320
},
{
"epoch": 2.35,
"learning_rate": 6.927409261576971e-05,
"loss": 0.6906,
"step": 1330
},
{
"epoch": 2.36,
"learning_rate": 6.739674593241552e-05,
"loss": 0.6951,
"step": 1340
},
{
"epoch": 2.38,
"learning_rate": 6.551939924906132e-05,
"loss": 0.692,
"step": 1350
},
{
"epoch": 2.4,
"learning_rate": 6.364205256570713e-05,
"loss": 0.6868,
"step": 1360
},
{
"epoch": 2.42,
"learning_rate": 6.176470588235294e-05,
"loss": 0.6895,
"step": 1370
},
{
"epoch": 2.44,
"learning_rate": 5.988735919899874e-05,
"loss": 0.6937,
"step": 1380
},
{
"epoch": 2.45,
"learning_rate": 5.801001251564455e-05,
"loss": 0.6825,
"step": 1390
},
{
"epoch": 2.47,
"learning_rate": 5.6132665832290355e-05,
"loss": 0.6753,
"step": 1400
},
{
"epoch": 2.47,
"eval_loss": 0.6928293704986572,
"eval_runtime": 65.2965,
"eval_samples_per_second": 30.63,
"eval_steps_per_second": 0.766,
"step": 1400
}
],
"max_steps": 1698,
"num_train_epochs": 3,
"total_flos": 6.472761202517737e+18,
"trial_name": null,
"trial_params": null
}