alpagasus-13b / trainer_state.json
shikhartuli's picture
Alpagasus-13B fine-tuned on Claud-filtered data
bd88d7c
raw
history blame contribute delete
No virus
15.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.963855421686747,
"global_step": 123,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 5e-06,
"loss": 12.1442,
"step": 1
},
{
"epoch": 0.05,
"learning_rate": 1e-05,
"loss": 12.2066,
"step": 2
},
{
"epoch": 0.07,
"learning_rate": 1.5000000000000002e-05,
"loss": 11.4198,
"step": 3
},
{
"epoch": 0.1,
"learning_rate": 2e-05,
"loss": 10.553,
"step": 4
},
{
"epoch": 0.12,
"learning_rate": 1.9996515418688493e-05,
"loss": 11.5989,
"step": 5
},
{
"epoch": 0.14,
"learning_rate": 1.998606410321534e-05,
"loss": 10.1464,
"step": 6
},
{
"epoch": 0.17,
"learning_rate": 1.9968653337272262e-05,
"loss": 9.2548,
"step": 7
},
{
"epoch": 0.19,
"learning_rate": 1.9944295254705187e-05,
"loss": 8.5892,
"step": 8
},
{
"epoch": 0.22,
"learning_rate": 1.9913006831057967e-05,
"loss": 7.8936,
"step": 9
},
{
"epoch": 0.24,
"learning_rate": 1.9874809871741877e-05,
"loss": 8.0968,
"step": 10
},
{
"epoch": 0.27,
"learning_rate": 1.982973099683902e-05,
"loss": 7.8131,
"step": 11
},
{
"epoch": 0.29,
"learning_rate": 1.977780162255041e-05,
"loss": 7.504,
"step": 12
},
{
"epoch": 0.31,
"learning_rate": 1.9719057939301477e-05,
"loss": 7.3231,
"step": 13
},
{
"epoch": 0.34,
"learning_rate": 1.9653540886520387e-05,
"loss": 7.0976,
"step": 14
},
{
"epoch": 0.36,
"learning_rate": 1.9581296124106682e-05,
"loss": 6.9036,
"step": 15
},
{
"epoch": 0.39,
"learning_rate": 1.9502374000610152e-05,
"loss": 6.6765,
"step": 16
},
{
"epoch": 0.41,
"learning_rate": 1.941682951814212e-05,
"loss": 6.4822,
"step": 17
},
{
"epoch": 0.43,
"learning_rate": 1.932472229404356e-05,
"loss": 6.4258,
"step": 18
},
{
"epoch": 0.46,
"learning_rate": 1.922611651933683e-05,
"loss": 6.2448,
"step": 19
},
{
"epoch": 0.48,
"learning_rate": 1.912108091398988e-05,
"loss": 6.0179,
"step": 20
},
{
"epoch": 0.51,
"learning_rate": 1.900968867902419e-05,
"loss": 5.8236,
"step": 21
},
{
"epoch": 0.53,
"learning_rate": 1.8892017445499812e-05,
"loss": 5.8436,
"step": 22
},
{
"epoch": 0.55,
"learning_rate": 1.876814922041299e-05,
"loss": 5.7092,
"step": 23
},
{
"epoch": 0.58,
"learning_rate": 1.8638170329544164e-05,
"loss": 5.5598,
"step": 24
},
{
"epoch": 0.6,
"learning_rate": 1.8502171357296144e-05,
"loss": 5.5397,
"step": 25
},
{
"epoch": 0.63,
"learning_rate": 1.8360247083564343e-05,
"loss": 5.3061,
"step": 26
},
{
"epoch": 0.65,
"learning_rate": 1.8212496417683135e-05,
"loss": 5.2484,
"step": 27
},
{
"epoch": 0.67,
"learning_rate": 1.805902232949435e-05,
"loss": 5.4282,
"step": 28
},
{
"epoch": 0.7,
"learning_rate": 1.789993177758588e-05,
"loss": 5.1144,
"step": 29
},
{
"epoch": 0.72,
"learning_rate": 1.773533563475053e-05,
"loss": 4.9468,
"step": 30
},
{
"epoch": 0.75,
"learning_rate": 1.7565348610716963e-05,
"loss": 5.0151,
"step": 31
},
{
"epoch": 0.77,
"learning_rate": 1.7390089172206594e-05,
"loss": 4.7214,
"step": 32
},
{
"epoch": 0.8,
"learning_rate": 1.720967946037225e-05,
"loss": 4.6076,
"step": 33
},
{
"epoch": 0.82,
"learning_rate": 1.7024245205675986e-05,
"loss": 4.7376,
"step": 34
},
{
"epoch": 0.84,
"learning_rate": 1.6833915640265485e-05,
"loss": 4.4775,
"step": 35
},
{
"epoch": 0.87,
"learning_rate": 1.6638823407910085e-05,
"loss": 4.5881,
"step": 36
},
{
"epoch": 0.89,
"learning_rate": 1.6439104471559157e-05,
"loss": 4.3676,
"step": 37
},
{
"epoch": 0.92,
"learning_rate": 1.6234898018587336e-05,
"loss": 4.6396,
"step": 38
},
{
"epoch": 0.94,
"learning_rate": 1.6026346363792565e-05,
"loss": 4.1056,
"step": 39
},
{
"epoch": 0.96,
"learning_rate": 1.58135948502146e-05,
"loss": 4.0982,
"step": 40
},
{
"epoch": 0.99,
"learning_rate": 1.5596791747843083e-05,
"loss": 4.2723,
"step": 41
},
{
"epoch": 1.01,
"learning_rate": 1.5376088150285777e-05,
"loss": 3.942,
"step": 42
},
{
"epoch": 1.04,
"learning_rate": 1.515163786946896e-05,
"loss": 3.6775,
"step": 43
},
{
"epoch": 1.06,
"learning_rate": 1.4923597328443423e-05,
"loss": 3.3249,
"step": 44
},
{
"epoch": 1.08,
"learning_rate": 1.4692125452370664e-05,
"loss": 3.4726,
"step": 45
},
{
"epoch": 1.11,
"learning_rate": 1.4457383557765385e-05,
"loss": 3.2118,
"step": 46
},
{
"epoch": 1.13,
"learning_rate": 1.4219535240071378e-05,
"loss": 3.2224,
"step": 47
},
{
"epoch": 1.16,
"learning_rate": 1.397874625964921e-05,
"loss": 3.1223,
"step": 48
},
{
"epoch": 1.18,
"learning_rate": 1.3735184426255117e-05,
"loss": 3.1626,
"step": 49
},
{
"epoch": 1.2,
"learning_rate": 1.348901948209167e-05,
"loss": 2.9617,
"step": 50
},
{
"epoch": 1.23,
"learning_rate": 1.324042298351166e-05,
"loss": 2.8352,
"step": 51
},
{
"epoch": 1.25,
"learning_rate": 1.2989568181457704e-05,
"loss": 3.3436,
"step": 52
},
{
"epoch": 1.28,
"learning_rate": 1.2736629900720832e-05,
"loss": 3.2803,
"step": 53
},
{
"epoch": 1.3,
"learning_rate": 1.248178441810224e-05,
"loss": 3.129,
"step": 54
},
{
"epoch": 1.33,
"learning_rate": 1.2225209339563144e-05,
"loss": 3.0431,
"step": 55
},
{
"epoch": 1.35,
"learning_rate": 1.1967083476448282e-05,
"loss": 3.0114,
"step": 56
},
{
"epoch": 1.37,
"learning_rate": 1.1707586720869375e-05,
"loss": 3.0263,
"step": 57
},
{
"epoch": 1.4,
"learning_rate": 1.1446899920335407e-05,
"loss": 2.8614,
"step": 58
},
{
"epoch": 1.42,
"learning_rate": 1.118520475171703e-05,
"loss": 2.8583,
"step": 59
},
{
"epoch": 1.45,
"learning_rate": 1.092268359463302e-05,
"loss": 2.8472,
"step": 60
},
{
"epoch": 1.47,
"learning_rate": 1.0659519404346955e-05,
"loss": 2.7311,
"step": 61
},
{
"epoch": 1.49,
"learning_rate": 1.0395895584262696e-05,
"loss": 2.6286,
"step": 62
},
{
"epoch": 1.52,
"learning_rate": 1.013199585810759e-05,
"loss": 2.7301,
"step": 63
},
{
"epoch": 1.54,
"learning_rate": 9.868004141892412e-06,
"loss": 2.7739,
"step": 64
},
{
"epoch": 1.57,
"learning_rate": 9.604104415737309e-06,
"loss": 2.6254,
"step": 65
},
{
"epoch": 1.59,
"learning_rate": 9.340480595653047e-06,
"loss": 2.6452,
"step": 66
},
{
"epoch": 1.61,
"learning_rate": 9.07731640536698e-06,
"loss": 2.7975,
"step": 67
},
{
"epoch": 1.64,
"learning_rate": 8.814795248282974e-06,
"loss": 2.7089,
"step": 68
},
{
"epoch": 1.66,
"learning_rate": 8.553100079664598e-06,
"loss": 2.7918,
"step": 69
},
{
"epoch": 1.69,
"learning_rate": 8.292413279130625e-06,
"loss": 2.5251,
"step": 70
},
{
"epoch": 1.71,
"learning_rate": 8.03291652355172e-06,
"loss": 2.5613,
"step": 71
},
{
"epoch": 1.73,
"learning_rate": 7.774790660436857e-06,
"loss": 2.7616,
"step": 72
},
{
"epoch": 1.76,
"learning_rate": 7.518215581897763e-06,
"loss": 2.6208,
"step": 73
},
{
"epoch": 1.78,
"learning_rate": 7.263370099279173e-06,
"loss": 2.7671,
"step": 74
},
{
"epoch": 1.81,
"learning_rate": 7.010431818542298e-06,
"loss": 2.5627,
"step": 75
},
{
"epoch": 1.83,
"learning_rate": 6.759577016488343e-06,
"loss": 2.5638,
"step": 76
},
{
"epoch": 1.86,
"learning_rate": 6.510980517908334e-06,
"loss": 2.4598,
"step": 77
},
{
"epoch": 1.88,
"learning_rate": 6.264815573744884e-06,
"loss": 2.5585,
"step": 78
},
{
"epoch": 1.9,
"learning_rate": 6.021253740350793e-06,
"loss": 2.7079,
"step": 79
},
{
"epoch": 1.93,
"learning_rate": 5.780464759928623e-06,
"loss": 2.4145,
"step": 80
},
{
"epoch": 1.95,
"learning_rate": 5.542616442234618e-06,
"loss": 2.7065,
"step": 81
},
{
"epoch": 1.98,
"learning_rate": 5.307874547629339e-06,
"loss": 2.588,
"step": 82
},
{
"epoch": 2.0,
"learning_rate": 5.076402671556578e-06,
"loss": 2.2886,
"step": 83
},
{
"epoch": 2.02,
"learning_rate": 4.848362130531039e-06,
"loss": 1.7128,
"step": 84
},
{
"epoch": 2.05,
"learning_rate": 4.623911849714226e-06,
"loss": 1.7341,
"step": 85
},
{
"epoch": 2.07,
"learning_rate": 4.403208252156921e-06,
"loss": 1.9271,
"step": 86
},
{
"epoch": 2.1,
"learning_rate": 4.186405149785403e-06,
"loss": 1.7111,
"step": 87
},
{
"epoch": 2.12,
"learning_rate": 3.973653636207437e-06,
"loss": 1.7875,
"step": 88
},
{
"epoch": 2.14,
"learning_rate": 3.7651019814126656e-06,
"loss": 1.633,
"step": 89
},
{
"epoch": 2.17,
"learning_rate": 3.560895528440844e-06,
"loss": 1.5634,
"step": 90
},
{
"epoch": 2.19,
"learning_rate": 3.361176592089919e-06,
"loss": 1.6087,
"step": 91
},
{
"epoch": 2.22,
"learning_rate": 3.1660843597345137e-06,
"loss": 1.7095,
"step": 92
},
{
"epoch": 2.24,
"learning_rate": 2.975754794324015e-06,
"loss": 1.7235,
"step": 93
},
{
"epoch": 2.27,
"learning_rate": 2.7903205396277546e-06,
"loss": 1.5522,
"step": 94
},
{
"epoch": 2.29,
"learning_rate": 2.6099108277934105e-06,
"loss": 1.594,
"step": 95
},
{
"epoch": 2.31,
"learning_rate": 2.4346513892830427e-06,
"loss": 1.5743,
"step": 96
},
{
"epoch": 2.34,
"learning_rate": 2.2646643652494693e-06,
"loss": 1.8422,
"step": 97
},
{
"epoch": 2.36,
"learning_rate": 2.100068222414121e-06,
"loss": 1.5189,
"step": 98
},
{
"epoch": 2.39,
"learning_rate": 1.9409776705056514e-06,
"loss": 1.633,
"step": 99
},
{
"epoch": 2.41,
"learning_rate": 1.7875035823168641e-06,
"loss": 1.5292,
"step": 100
},
{
"epoch": 2.43,
"learning_rate": 1.6397529164356606e-06,
"loss": 1.5055,
"step": 101
},
{
"epoch": 2.46,
"learning_rate": 1.4978286427038602e-06,
"loss": 1.6929,
"step": 102
},
{
"epoch": 2.48,
"learning_rate": 1.3618296704558364e-06,
"loss": 1.6472,
"step": 103
},
{
"epoch": 2.51,
"learning_rate": 1.2318507795870138e-06,
"loss": 1.5057,
"step": 104
},
{
"epoch": 2.53,
"learning_rate": 1.1079825545001887e-06,
"loss": 1.4482,
"step": 105
},
{
"epoch": 2.55,
"learning_rate": 9.903113209758098e-07,
"loss": 1.7398,
"step": 106
},
{
"epoch": 2.58,
"learning_rate": 8.789190860101226e-07,
"loss": 1.604,
"step": 107
},
{
"epoch": 2.6,
"learning_rate": 7.738834806631712e-07,
"loss": 1.6135,
"step": 108
},
{
"epoch": 2.63,
"learning_rate": 6.752777059564431e-07,
"loss": 1.5833,
"step": 109
},
{
"epoch": 2.65,
"learning_rate": 5.831704818578842e-07,
"loss": 1.8117,
"step": 110
},
{
"epoch": 2.67,
"learning_rate": 4.976259993898503e-07,
"loss": 1.5287,
"step": 111
},
{
"epoch": 2.7,
"learning_rate": 4.187038758933204e-07,
"loss": 1.5151,
"step": 112
},
{
"epoch": 2.72,
"learning_rate": 3.4645911347961357e-07,
"loss": 1.5188,
"step": 113
},
{
"epoch": 2.75,
"learning_rate": 2.809420606985236e-07,
"loss": 1.5203,
"step": 114
},
{
"epoch": 2.77,
"learning_rate": 2.2219837744959284e-07,
"loss": 1.6247,
"step": 115
},
{
"epoch": 2.8,
"learning_rate": 1.7026900316098217e-07,
"loss": 1.4147,
"step": 116
},
{
"epoch": 2.82,
"learning_rate": 1.2519012825812804e-07,
"loss": 1.6317,
"step": 117
},
{
"epoch": 2.84,
"learning_rate": 8.699316894203225e-08,
"loss": 1.6046,
"step": 118
},
{
"epoch": 2.87,
"learning_rate": 5.5704745294815624e-08,
"loss": 1.5425,
"step": 119
},
{
"epoch": 2.89,
"learning_rate": 3.134666272774034e-08,
"loss": 1.622,
"step": 120
},
{
"epoch": 2.92,
"learning_rate": 1.3935896784663671e-08,
"loss": 1.597,
"step": 121
},
{
"epoch": 2.94,
"learning_rate": 3.4845813115114147e-09,
"loss": 1.6068,
"step": 122
},
{
"epoch": 2.96,
"learning_rate": 0.0,
"loss": 1.5153,
"step": 123
},
{
"epoch": 2.96,
"step": 123,
"total_flos": 2.008420388228301e+16,
"train_loss": 3.741002580983852,
"train_runtime": 2257.2991,
"train_samples_per_second": 7.05,
"train_steps_per_second": 0.054
}
],
"max_steps": 123,
"num_train_epochs": 3,
"total_flos": 2.008420388228301e+16,
"trial_name": null,
"trial_params": null
}