bert-base-arabertv2_1 / trainer_state.json
elsayedissa's picture
End of training
6abcda9 verified
raw
history blame contribute delete
No virus
26.1 kB
{
"best_metric": 0.894333004951477,
"best_model_checkpoint": "bert-base-arabertv2_1/checkpoint-1000",
"epoch": 22.831050228310502,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"learning_rate": 9.200000000000001e-07,
"loss": 1.0908,
"step": 25
},
{
"epoch": 0.23,
"learning_rate": 1.8400000000000002e-06,
"loss": 1.0474,
"step": 50
},
{
"epoch": 0.34,
"learning_rate": 2.7600000000000003e-06,
"loss": 0.9963,
"step": 75
},
{
"epoch": 0.46,
"learning_rate": 3.7600000000000004e-06,
"loss": 1.0041,
"step": 100
},
{
"epoch": 0.57,
"learning_rate": 4.76e-06,
"loss": 1.0071,
"step": 125
},
{
"epoch": 0.68,
"learning_rate": 5.76e-06,
"loss": 0.9659,
"step": 150
},
{
"epoch": 0.8,
"learning_rate": 6.760000000000001e-06,
"loss": 0.9522,
"step": 175
},
{
"epoch": 0.91,
"learning_rate": 7.76e-06,
"loss": 0.9335,
"step": 200
},
{
"epoch": 1.03,
"learning_rate": 8.76e-06,
"loss": 0.9048,
"step": 225
},
{
"epoch": 1.14,
"learning_rate": 9.760000000000001e-06,
"loss": 0.8749,
"step": 250
},
{
"epoch": 1.26,
"learning_rate": 1.0760000000000002e-05,
"loss": 0.9108,
"step": 275
},
{
"epoch": 1.37,
"learning_rate": 1.1760000000000001e-05,
"loss": 0.8703,
"step": 300
},
{
"epoch": 1.48,
"learning_rate": 1.2760000000000001e-05,
"loss": 0.8957,
"step": 325
},
{
"epoch": 1.6,
"learning_rate": 1.376e-05,
"loss": 0.8933,
"step": 350
},
{
"epoch": 1.71,
"learning_rate": 1.4760000000000001e-05,
"loss": 0.8811,
"step": 375
},
{
"epoch": 1.83,
"learning_rate": 1.576e-05,
"loss": 0.8982,
"step": 400
},
{
"epoch": 1.94,
"learning_rate": 1.6760000000000002e-05,
"loss": 0.857,
"step": 425
},
{
"epoch": 2.05,
"learning_rate": 1.7760000000000003e-05,
"loss": 0.8538,
"step": 450
},
{
"epoch": 2.17,
"learning_rate": 1.876e-05,
"loss": 0.8573,
"step": 475
},
{
"epoch": 2.28,
"learning_rate": 1.976e-05,
"loss": 0.766,
"step": 500
},
{
"epoch": 2.4,
"learning_rate": 1.9915555555555557e-05,
"loss": 0.7945,
"step": 525
},
{
"epoch": 2.51,
"learning_rate": 1.9804444444444445e-05,
"loss": 0.8713,
"step": 550
},
{
"epoch": 2.63,
"learning_rate": 1.9693333333333337e-05,
"loss": 0.8652,
"step": 575
},
{
"epoch": 2.74,
"learning_rate": 1.9582222222222225e-05,
"loss": 0.8292,
"step": 600
},
{
"epoch": 2.85,
"learning_rate": 1.9471111111111113e-05,
"loss": 0.7645,
"step": 625
},
{
"epoch": 2.97,
"learning_rate": 1.936e-05,
"loss": 0.7594,
"step": 650
},
{
"epoch": 3.08,
"learning_rate": 1.924888888888889e-05,
"loss": 0.7144,
"step": 675
},
{
"epoch": 3.2,
"learning_rate": 1.913777777777778e-05,
"loss": 0.6814,
"step": 700
},
{
"epoch": 3.31,
"learning_rate": 1.902666666666667e-05,
"loss": 0.7054,
"step": 725
},
{
"epoch": 3.42,
"learning_rate": 1.8915555555555557e-05,
"loss": 0.7027,
"step": 750
},
{
"epoch": 3.54,
"learning_rate": 1.8804444444444445e-05,
"loss": 0.678,
"step": 775
},
{
"epoch": 3.65,
"learning_rate": 1.8693333333333333e-05,
"loss": 0.7418,
"step": 800
},
{
"epoch": 3.77,
"learning_rate": 1.8582222222222224e-05,
"loss": 0.7237,
"step": 825
},
{
"epoch": 3.88,
"learning_rate": 1.8471111111111112e-05,
"loss": 0.688,
"step": 850
},
{
"epoch": 4.0,
"learning_rate": 1.8360000000000004e-05,
"loss": 0.7115,
"step": 875
},
{
"epoch": 4.11,
"learning_rate": 1.824888888888889e-05,
"loss": 0.6277,
"step": 900
},
{
"epoch": 4.22,
"learning_rate": 1.813777777777778e-05,
"loss": 0.6007,
"step": 925
},
{
"epoch": 4.34,
"learning_rate": 1.8026666666666668e-05,
"loss": 0.5753,
"step": 950
},
{
"epoch": 4.45,
"learning_rate": 1.7915555555555556e-05,
"loss": 0.5818,
"step": 975
},
{
"epoch": 4.57,
"learning_rate": 1.7804444444444448e-05,
"loss": 0.6125,
"step": 1000
},
{
"epoch": 4.57,
"eval_accuracy": 0.65,
"eval_loss": 0.894333004951477,
"eval_runtime": 2.6838,
"eval_samples_per_second": 372.609,
"eval_steps_per_second": 46.576,
"step": 1000
},
{
"epoch": 4.68,
"learning_rate": 1.7693333333333336e-05,
"loss": 0.5496,
"step": 1025
},
{
"epoch": 4.79,
"learning_rate": 1.7582222222222224e-05,
"loss": 0.5603,
"step": 1050
},
{
"epoch": 4.91,
"learning_rate": 1.7471111111111112e-05,
"loss": 0.5716,
"step": 1075
},
{
"epoch": 5.02,
"learning_rate": 1.736e-05,
"loss": 0.5522,
"step": 1100
},
{
"epoch": 5.14,
"learning_rate": 1.724888888888889e-05,
"loss": 0.4755,
"step": 1125
},
{
"epoch": 5.25,
"learning_rate": 1.713777777777778e-05,
"loss": 0.4464,
"step": 1150
},
{
"epoch": 5.37,
"learning_rate": 1.702666666666667e-05,
"loss": 0.4405,
"step": 1175
},
{
"epoch": 5.48,
"learning_rate": 1.6915555555555555e-05,
"loss": 0.4359,
"step": 1200
},
{
"epoch": 5.59,
"learning_rate": 1.6804444444444447e-05,
"loss": 0.463,
"step": 1225
},
{
"epoch": 5.71,
"learning_rate": 1.6693333333333335e-05,
"loss": 0.4788,
"step": 1250
},
{
"epoch": 5.82,
"learning_rate": 1.6582222222222223e-05,
"loss": 0.4673,
"step": 1275
},
{
"epoch": 5.94,
"learning_rate": 1.6471111111111115e-05,
"loss": 0.4315,
"step": 1300
},
{
"epoch": 6.05,
"learning_rate": 1.636e-05,
"loss": 0.4113,
"step": 1325
},
{
"epoch": 6.16,
"learning_rate": 1.624888888888889e-05,
"loss": 0.3585,
"step": 1350
},
{
"epoch": 6.28,
"learning_rate": 1.613777777777778e-05,
"loss": 0.3595,
"step": 1375
},
{
"epoch": 6.39,
"learning_rate": 1.6026666666666667e-05,
"loss": 0.3161,
"step": 1400
},
{
"epoch": 6.51,
"learning_rate": 1.5915555555555558e-05,
"loss": 0.3476,
"step": 1425
},
{
"epoch": 6.62,
"learning_rate": 1.5804444444444446e-05,
"loss": 0.3784,
"step": 1450
},
{
"epoch": 6.74,
"learning_rate": 1.5693333333333334e-05,
"loss": 0.3336,
"step": 1475
},
{
"epoch": 6.85,
"learning_rate": 1.5582222222222222e-05,
"loss": 0.3314,
"step": 1500
},
{
"epoch": 6.96,
"learning_rate": 1.5471111111111114e-05,
"loss": 0.3501,
"step": 1525
},
{
"epoch": 7.08,
"learning_rate": 1.5360000000000002e-05,
"loss": 0.274,
"step": 1550
},
{
"epoch": 7.19,
"learning_rate": 1.524888888888889e-05,
"loss": 0.2473,
"step": 1575
},
{
"epoch": 7.31,
"learning_rate": 1.513777777777778e-05,
"loss": 0.237,
"step": 1600
},
{
"epoch": 7.42,
"learning_rate": 1.5026666666666668e-05,
"loss": 0.2742,
"step": 1625
},
{
"epoch": 7.53,
"learning_rate": 1.4915555555555556e-05,
"loss": 0.2906,
"step": 1650
},
{
"epoch": 7.65,
"learning_rate": 1.4804444444444446e-05,
"loss": 0.2499,
"step": 1675
},
{
"epoch": 7.76,
"learning_rate": 1.4693333333333336e-05,
"loss": 0.2694,
"step": 1700
},
{
"epoch": 7.88,
"learning_rate": 1.4582222222222224e-05,
"loss": 0.2465,
"step": 1725
},
{
"epoch": 7.99,
"learning_rate": 1.4471111111111112e-05,
"loss": 0.2739,
"step": 1750
},
{
"epoch": 8.11,
"learning_rate": 1.4360000000000001e-05,
"loss": 0.1963,
"step": 1775
},
{
"epoch": 8.22,
"learning_rate": 1.424888888888889e-05,
"loss": 0.1694,
"step": 1800
},
{
"epoch": 8.33,
"learning_rate": 1.413777777777778e-05,
"loss": 0.1532,
"step": 1825
},
{
"epoch": 8.45,
"learning_rate": 1.4026666666666669e-05,
"loss": 0.1692,
"step": 1850
},
{
"epoch": 8.56,
"learning_rate": 1.3915555555555557e-05,
"loss": 0.1687,
"step": 1875
},
{
"epoch": 8.68,
"learning_rate": 1.3804444444444445e-05,
"loss": 0.2099,
"step": 1900
},
{
"epoch": 8.79,
"learning_rate": 1.3693333333333333e-05,
"loss": 0.1927,
"step": 1925
},
{
"epoch": 8.9,
"learning_rate": 1.3582222222222223e-05,
"loss": 0.2065,
"step": 1950
},
{
"epoch": 9.02,
"learning_rate": 1.3471111111111113e-05,
"loss": 0.2312,
"step": 1975
},
{
"epoch": 9.13,
"learning_rate": 1.3360000000000003e-05,
"loss": 0.1438,
"step": 2000
},
{
"epoch": 9.13,
"eval_accuracy": 0.622,
"eval_loss": 1.4417067766189575,
"eval_runtime": 2.6482,
"eval_samples_per_second": 377.613,
"eval_steps_per_second": 47.202,
"step": 2000
},
{
"epoch": 9.25,
"learning_rate": 1.3248888888888889e-05,
"loss": 0.1875,
"step": 2025
},
{
"epoch": 9.36,
"learning_rate": 1.3137777777777779e-05,
"loss": 0.1359,
"step": 2050
},
{
"epoch": 9.47,
"learning_rate": 1.3026666666666667e-05,
"loss": 0.1177,
"step": 2075
},
{
"epoch": 9.59,
"learning_rate": 1.2915555555555557e-05,
"loss": 0.1494,
"step": 2100
},
{
"epoch": 9.7,
"learning_rate": 1.2804444444444446e-05,
"loss": 0.1313,
"step": 2125
},
{
"epoch": 9.82,
"learning_rate": 1.2693333333333336e-05,
"loss": 0.1571,
"step": 2150
},
{
"epoch": 9.93,
"learning_rate": 1.2582222222222222e-05,
"loss": 0.1671,
"step": 2175
},
{
"epoch": 10.05,
"learning_rate": 1.2471111111111112e-05,
"loss": 0.1264,
"step": 2200
},
{
"epoch": 10.16,
"learning_rate": 1.236e-05,
"loss": 0.0969,
"step": 2225
},
{
"epoch": 10.27,
"learning_rate": 1.224888888888889e-05,
"loss": 0.0971,
"step": 2250
},
{
"epoch": 10.39,
"learning_rate": 1.213777777777778e-05,
"loss": 0.1435,
"step": 2275
},
{
"epoch": 10.5,
"learning_rate": 1.202666666666667e-05,
"loss": 0.0938,
"step": 2300
},
{
"epoch": 10.62,
"learning_rate": 1.1915555555555556e-05,
"loss": 0.0844,
"step": 2325
},
{
"epoch": 10.73,
"learning_rate": 1.1804444444444446e-05,
"loss": 0.109,
"step": 2350
},
{
"epoch": 10.84,
"learning_rate": 1.1693333333333334e-05,
"loss": 0.0935,
"step": 2375
},
{
"epoch": 10.96,
"learning_rate": 1.1582222222222224e-05,
"loss": 0.1274,
"step": 2400
},
{
"epoch": 11.07,
"learning_rate": 1.1471111111111113e-05,
"loss": 0.0904,
"step": 2425
},
{
"epoch": 11.19,
"learning_rate": 1.136e-05,
"loss": 0.0675,
"step": 2450
},
{
"epoch": 11.3,
"learning_rate": 1.124888888888889e-05,
"loss": 0.0875,
"step": 2475
},
{
"epoch": 11.42,
"learning_rate": 1.113777777777778e-05,
"loss": 0.0729,
"step": 2500
},
{
"epoch": 11.53,
"learning_rate": 1.1026666666666667e-05,
"loss": 0.0708,
"step": 2525
},
{
"epoch": 11.64,
"learning_rate": 1.0915555555555557e-05,
"loss": 0.0948,
"step": 2550
},
{
"epoch": 11.76,
"learning_rate": 1.0804444444444447e-05,
"loss": 0.0795,
"step": 2575
},
{
"epoch": 11.87,
"learning_rate": 1.0693333333333333e-05,
"loss": 0.0761,
"step": 2600
},
{
"epoch": 11.99,
"learning_rate": 1.0582222222222223e-05,
"loss": 0.0987,
"step": 2625
},
{
"epoch": 12.1,
"learning_rate": 1.0471111111111113e-05,
"loss": 0.0723,
"step": 2650
},
{
"epoch": 12.21,
"learning_rate": 1.036e-05,
"loss": 0.054,
"step": 2675
},
{
"epoch": 12.33,
"learning_rate": 1.024888888888889e-05,
"loss": 0.0688,
"step": 2700
},
{
"epoch": 12.44,
"learning_rate": 1.013777777777778e-05,
"loss": 0.079,
"step": 2725
},
{
"epoch": 12.56,
"learning_rate": 1.0026666666666667e-05,
"loss": 0.0457,
"step": 2750
},
{
"epoch": 12.67,
"learning_rate": 9.915555555555556e-06,
"loss": 0.0554,
"step": 2775
},
{
"epoch": 12.79,
"learning_rate": 9.804444444444444e-06,
"loss": 0.066,
"step": 2800
},
{
"epoch": 12.9,
"learning_rate": 9.693333333333334e-06,
"loss": 0.0432,
"step": 2825
},
{
"epoch": 13.01,
"learning_rate": 9.582222222222222e-06,
"loss": 0.0783,
"step": 2850
},
{
"epoch": 13.13,
"learning_rate": 9.471111111111112e-06,
"loss": 0.0645,
"step": 2875
},
{
"epoch": 13.24,
"learning_rate": 9.360000000000002e-06,
"loss": 0.0483,
"step": 2900
},
{
"epoch": 13.36,
"learning_rate": 9.24888888888889e-06,
"loss": 0.0524,
"step": 2925
},
{
"epoch": 13.47,
"learning_rate": 9.137777777777778e-06,
"loss": 0.0497,
"step": 2950
},
{
"epoch": 13.58,
"learning_rate": 9.026666666666666e-06,
"loss": 0.0492,
"step": 2975
},
{
"epoch": 13.7,
"learning_rate": 8.915555555555556e-06,
"loss": 0.0537,
"step": 3000
},
{
"epoch": 13.7,
"eval_accuracy": 0.62,
"eval_loss": 2.1143836975097656,
"eval_runtime": 2.8406,
"eval_samples_per_second": 352.038,
"eval_steps_per_second": 44.005,
"step": 3000
},
{
"epoch": 13.81,
"learning_rate": 8.804444444444446e-06,
"loss": 0.0493,
"step": 3025
},
{
"epoch": 13.93,
"learning_rate": 8.693333333333334e-06,
"loss": 0.0601,
"step": 3050
},
{
"epoch": 14.04,
"learning_rate": 8.582222222222223e-06,
"loss": 0.0522,
"step": 3075
},
{
"epoch": 14.16,
"learning_rate": 8.471111111111112e-06,
"loss": 0.024,
"step": 3100
},
{
"epoch": 14.27,
"learning_rate": 8.36e-06,
"loss": 0.0418,
"step": 3125
},
{
"epoch": 14.38,
"learning_rate": 8.24888888888889e-06,
"loss": 0.0578,
"step": 3150
},
{
"epoch": 14.5,
"learning_rate": 8.137777777777779e-06,
"loss": 0.0423,
"step": 3175
},
{
"epoch": 14.61,
"learning_rate": 8.026666666666667e-06,
"loss": 0.0478,
"step": 3200
},
{
"epoch": 14.73,
"learning_rate": 7.915555555555557e-06,
"loss": 0.0447,
"step": 3225
},
{
"epoch": 14.84,
"learning_rate": 7.804444444444445e-06,
"loss": 0.0416,
"step": 3250
},
{
"epoch": 14.95,
"learning_rate": 7.693333333333333e-06,
"loss": 0.0473,
"step": 3275
},
{
"epoch": 15.07,
"learning_rate": 7.582222222222223e-06,
"loss": 0.0459,
"step": 3300
},
{
"epoch": 15.18,
"learning_rate": 7.471111111111111e-06,
"loss": 0.0151,
"step": 3325
},
{
"epoch": 15.3,
"learning_rate": 7.360000000000001e-06,
"loss": 0.0403,
"step": 3350
},
{
"epoch": 15.41,
"learning_rate": 7.24888888888889e-06,
"loss": 0.0601,
"step": 3375
},
{
"epoch": 15.53,
"learning_rate": 7.137777777777778e-06,
"loss": 0.03,
"step": 3400
},
{
"epoch": 15.64,
"learning_rate": 7.0266666666666674e-06,
"loss": 0.0176,
"step": 3425
},
{
"epoch": 15.75,
"learning_rate": 6.915555555555556e-06,
"loss": 0.0169,
"step": 3450
},
{
"epoch": 15.87,
"learning_rate": 6.8044444444444444e-06,
"loss": 0.0351,
"step": 3475
},
{
"epoch": 15.98,
"learning_rate": 6.693333333333334e-06,
"loss": 0.0477,
"step": 3500
},
{
"epoch": 16.1,
"learning_rate": 6.582222222222223e-06,
"loss": 0.0474,
"step": 3525
},
{
"epoch": 16.21,
"learning_rate": 6.471111111111111e-06,
"loss": 0.0317,
"step": 3550
},
{
"epoch": 16.32,
"learning_rate": 6.360000000000001e-06,
"loss": 0.0216,
"step": 3575
},
{
"epoch": 16.44,
"learning_rate": 6.24888888888889e-06,
"loss": 0.0238,
"step": 3600
},
{
"epoch": 16.55,
"learning_rate": 6.137777777777778e-06,
"loss": 0.0458,
"step": 3625
},
{
"epoch": 16.67,
"learning_rate": 6.026666666666668e-06,
"loss": 0.0273,
"step": 3650
},
{
"epoch": 16.78,
"learning_rate": 5.915555555555556e-06,
"loss": 0.0058,
"step": 3675
},
{
"epoch": 16.89,
"learning_rate": 5.804444444444445e-06,
"loss": 0.0289,
"step": 3700
},
{
"epoch": 17.01,
"learning_rate": 5.6933333333333344e-06,
"loss": 0.0227,
"step": 3725
},
{
"epoch": 17.12,
"learning_rate": 5.5822222222222225e-06,
"loss": 0.0245,
"step": 3750
},
{
"epoch": 17.24,
"learning_rate": 5.4711111111111114e-06,
"loss": 0.0073,
"step": 3775
},
{
"epoch": 17.35,
"learning_rate": 5.36e-06,
"loss": 0.024,
"step": 3800
},
{
"epoch": 17.47,
"learning_rate": 5.248888888888889e-06,
"loss": 0.0094,
"step": 3825
},
{
"epoch": 17.58,
"learning_rate": 5.137777777777778e-06,
"loss": 0.0309,
"step": 3850
},
{
"epoch": 17.69,
"learning_rate": 5.026666666666667e-06,
"loss": 0.07,
"step": 3875
},
{
"epoch": 17.81,
"learning_rate": 4.915555555555556e-06,
"loss": 0.027,
"step": 3900
},
{
"epoch": 17.92,
"learning_rate": 4.804444444444445e-06,
"loss": 0.034,
"step": 3925
},
{
"epoch": 18.04,
"learning_rate": 4.693333333333334e-06,
"loss": 0.0337,
"step": 3950
},
{
"epoch": 18.15,
"learning_rate": 4.582222222222223e-06,
"loss": 0.0173,
"step": 3975
},
{
"epoch": 18.26,
"learning_rate": 4.471111111111112e-06,
"loss": 0.0346,
"step": 4000
},
{
"epoch": 18.26,
"eval_accuracy": 0.643,
"eval_loss": 2.5202674865722656,
"eval_runtime": 2.6541,
"eval_samples_per_second": 376.769,
"eval_steps_per_second": 47.096,
"step": 4000
},
{
"epoch": 18.38,
"learning_rate": 4.360000000000001e-06,
"loss": 0.0392,
"step": 4025
},
{
"epoch": 18.49,
"learning_rate": 4.248888888888889e-06,
"loss": 0.0405,
"step": 4050
},
{
"epoch": 18.61,
"learning_rate": 4.1377777777777784e-06,
"loss": 0.018,
"step": 4075
},
{
"epoch": 18.72,
"learning_rate": 4.026666666666667e-06,
"loss": 0.0129,
"step": 4100
},
{
"epoch": 18.84,
"learning_rate": 3.9155555555555554e-06,
"loss": 0.0188,
"step": 4125
},
{
"epoch": 18.95,
"learning_rate": 3.8044444444444443e-06,
"loss": 0.0141,
"step": 4150
},
{
"epoch": 19.06,
"learning_rate": 3.6933333333333337e-06,
"loss": 0.0237,
"step": 4175
},
{
"epoch": 19.18,
"learning_rate": 3.5822222222222226e-06,
"loss": 0.0135,
"step": 4200
},
{
"epoch": 19.29,
"learning_rate": 3.471111111111111e-06,
"loss": 0.0152,
"step": 4225
},
{
"epoch": 19.41,
"learning_rate": 3.3600000000000004e-06,
"loss": 0.0286,
"step": 4250
},
{
"epoch": 19.52,
"learning_rate": 3.2488888888888894e-06,
"loss": 0.0106,
"step": 4275
},
{
"epoch": 19.63,
"learning_rate": 3.137777777777778e-06,
"loss": 0.0184,
"step": 4300
},
{
"epoch": 19.75,
"learning_rate": 3.0266666666666668e-06,
"loss": 0.019,
"step": 4325
},
{
"epoch": 19.86,
"learning_rate": 2.915555555555556e-06,
"loss": 0.0084,
"step": 4350
},
{
"epoch": 19.98,
"learning_rate": 2.8044444444444446e-06,
"loss": 0.0092,
"step": 4375
},
{
"epoch": 20.09,
"learning_rate": 2.6933333333333335e-06,
"loss": 0.0138,
"step": 4400
},
{
"epoch": 20.21,
"learning_rate": 2.5822222222222224e-06,
"loss": 0.0052,
"step": 4425
},
{
"epoch": 20.32,
"learning_rate": 2.4711111111111114e-06,
"loss": 0.0093,
"step": 4450
},
{
"epoch": 20.43,
"learning_rate": 2.3600000000000003e-06,
"loss": 0.0114,
"step": 4475
},
{
"epoch": 20.55,
"learning_rate": 2.248888888888889e-06,
"loss": 0.0047,
"step": 4500
},
{
"epoch": 20.66,
"learning_rate": 2.137777777777778e-06,
"loss": 0.0304,
"step": 4525
},
{
"epoch": 20.78,
"learning_rate": 2.0266666666666666e-06,
"loss": 0.0076,
"step": 4550
},
{
"epoch": 20.89,
"learning_rate": 1.915555555555556e-06,
"loss": 0.0075,
"step": 4575
},
{
"epoch": 21.0,
"learning_rate": 1.8044444444444444e-06,
"loss": 0.0178,
"step": 4600
},
{
"epoch": 21.12,
"learning_rate": 1.6933333333333336e-06,
"loss": 0.0104,
"step": 4625
},
{
"epoch": 21.23,
"learning_rate": 1.5822222222222223e-06,
"loss": 0.0152,
"step": 4650
},
{
"epoch": 21.35,
"learning_rate": 1.4711111111111112e-06,
"loss": 0.0134,
"step": 4675
},
{
"epoch": 21.46,
"learning_rate": 1.3600000000000001e-06,
"loss": 0.0252,
"step": 4700
},
{
"epoch": 21.58,
"learning_rate": 1.248888888888889e-06,
"loss": 0.022,
"step": 4725
},
{
"epoch": 21.69,
"learning_rate": 1.137777777777778e-06,
"loss": 0.006,
"step": 4750
},
{
"epoch": 21.8,
"learning_rate": 1.0266666666666669e-06,
"loss": 0.0187,
"step": 4775
},
{
"epoch": 21.92,
"learning_rate": 9.155555555555557e-07,
"loss": 0.0115,
"step": 4800
},
{
"epoch": 22.03,
"learning_rate": 8.044444444444445e-07,
"loss": 0.0129,
"step": 4825
},
{
"epoch": 22.15,
"learning_rate": 6.933333333333334e-07,
"loss": 0.0066,
"step": 4850
},
{
"epoch": 22.26,
"learning_rate": 5.822222222222223e-07,
"loss": 0.0174,
"step": 4875
},
{
"epoch": 22.37,
"learning_rate": 4.7111111111111113e-07,
"loss": 0.0181,
"step": 4900
},
{
"epoch": 22.49,
"learning_rate": 3.6e-07,
"loss": 0.0089,
"step": 4925
},
{
"epoch": 22.6,
"learning_rate": 2.488888888888889e-07,
"loss": 0.0157,
"step": 4950
},
{
"epoch": 22.72,
"learning_rate": 1.3777777777777778e-07,
"loss": 0.0219,
"step": 4975
},
{
"epoch": 22.83,
"learning_rate": 2.6666666666666667e-08,
"loss": 0.0013,
"step": 5000
},
{
"epoch": 22.83,
"eval_accuracy": 0.636,
"eval_loss": 2.6798980236053467,
"eval_runtime": 2.8534,
"eval_samples_per_second": 350.458,
"eval_steps_per_second": 43.807,
"step": 5000
},
{
"epoch": 22.83,
"step": 5000,
"total_flos": 1.0512959718739968e+16,
"train_loss": 0.2579876671291888,
"train_runtime": 1594.4621,
"train_samples_per_second": 100.347,
"train_steps_per_second": 3.136
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 23,
"save_steps": 1000,
"total_flos": 1.0512959718739968e+16,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}