mala-alpaca-lora / trainer_state.json
danwils's picture
Upload 8 files
8838996 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.769124264451367,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 3.8461538461538456e-05,
"loss": 3.0784,
"step": 25
},
{
"epoch": 0.02,
"learning_rate": 7.692307692307691e-05,
"loss": 2.3977,
"step": 50
},
{
"epoch": 0.03,
"learning_rate": 0.00011538461538461538,
"loss": 1.443,
"step": 75
},
{
"epoch": 0.05,
"learning_rate": 0.00015384615384615382,
"loss": 0.8912,
"step": 100
},
{
"epoch": 0.06,
"learning_rate": 0.0001923076923076923,
"loss": 1.1574,
"step": 125
},
{
"epoch": 0.07,
"learning_rate": 0.00023076923076923076,
"loss": 0.8311,
"step": 150
},
{
"epoch": 0.08,
"learning_rate": 0.0002692307692307692,
"loss": 1.1308,
"step": 175
},
{
"epoch": 0.09,
"learning_rate": 0.0002999995341928201,
"loss": 0.7901,
"step": 200
},
{
"epoch": 0.1,
"learning_rate": 0.0002999832312452881,
"loss": 1.1048,
"step": 225
},
{
"epoch": 0.12,
"learning_rate": 0.0002999436408316976,
"loss": 0.7632,
"step": 250
},
{
"epoch": 0.13,
"learning_rate": 0.0002998807690991386,
"loss": 1.1239,
"step": 275
},
{
"epoch": 0.14,
"learning_rate": 0.00029979462580952446,
"loss": 0.7622,
"step": 300
},
{
"epoch": 0.15,
"learning_rate": 0.00029968522433807687,
"loss": 1.105,
"step": 325
},
{
"epoch": 0.16,
"learning_rate": 0.00029955258167124835,
"loss": 0.7669,
"step": 350
},
{
"epoch": 0.17,
"learning_rate": 0.0002993967184040853,
"loss": 1.0854,
"step": 375
},
{
"epoch": 0.18,
"learning_rate": 0.00029921765873703004,
"loss": 0.7586,
"step": 400
},
{
"epoch": 0.2,
"learning_rate": 0.0002990154304721634,
"loss": 1.0849,
"step": 425
},
{
"epoch": 0.21,
"learning_rate": 0.000298790065008888,
"loss": 0.7403,
"step": 450
},
{
"epoch": 0.22,
"learning_rate": 0.0002985415973390528,
"loss": 1.0786,
"step": 475
},
{
"epoch": 0.23,
"learning_rate": 0.00029827006604152,
"loss": 0.7676,
"step": 500
},
{
"epoch": 0.24,
"learning_rate": 0.00029797551327617543,
"loss": 1.087,
"step": 525
},
{
"epoch": 0.25,
"learning_rate": 0.0002976579847773818,
"loss": 0.7685,
"step": 550
},
{
"epoch": 0.27,
"learning_rate": 0.00029731752984687835,
"loss": 1.072,
"step": 575
},
{
"epoch": 0.28,
"learning_rate": 0.0002969542013461254,
"loss": 0.7611,
"step": 600
},
{
"epoch": 0.29,
"learning_rate": 0.000296568055688097,
"loss": 1.0691,
"step": 625
},
{
"epoch": 0.3,
"learning_rate": 0.00029615915282852175,
"loss": 0.7512,
"step": 650
},
{
"epoch": 0.31,
"learning_rate": 0.00029572755625657365,
"loss": 1.0789,
"step": 675
},
{
"epoch": 0.32,
"learning_rate": 0.00029527333298501427,
"loss": 0.739,
"step": 700
},
{
"epoch": 0.33,
"learning_rate": 0.0002947965535397879,
"loss": 1.0944,
"step": 725
},
{
"epoch": 0.35,
"learning_rate": 0.0002942972919490714,
"loss": 0.7681,
"step": 750
},
{
"epoch": 0.36,
"learning_rate": 0.0002937756257317797,
"loss": 1.0773,
"step": 775
},
{
"epoch": 0.37,
"learning_rate": 0.00029323163588552975,
"loss": 0.7541,
"step": 800
},
{
"epoch": 0.38,
"learning_rate": 0.0002926654068740645,
"loss": 1.0885,
"step": 825
},
{
"epoch": 0.39,
"learning_rate": 0.0002920770266141382,
"loss": 0.7505,
"step": 850
},
{
"epoch": 0.4,
"learning_rate": 0.000291466586461866,
"loss": 1.0623,
"step": 875
},
{
"epoch": 0.42,
"learning_rate": 0.00029083418119853904,
"loss": 0.7429,
"step": 900
},
{
"epoch": 0.43,
"learning_rate": 0.0002901799090159085,
"loss": 1.0523,
"step": 925
},
{
"epoch": 0.44,
"learning_rate": 0.00028950387150093916,
"loss": 0.7258,
"step": 950
},
{
"epoch": 0.45,
"learning_rate": 0.0002888061736200367,
"loss": 1.0618,
"step": 975
},
{
"epoch": 0.46,
"learning_rate": 0.0002880869237027496,
"loss": 0.739,
"step": 1000
},
{
"epoch": 0.47,
"learning_rate": 0.0002873462334249492,
"loss": 1.063,
"step": 1025
},
{
"epoch": 0.48,
"learning_rate": 0.00028658421779149,
"loss": 0.7298,
"step": 1050
},
{
"epoch": 0.5,
"learning_rate": 0.0002858009951183536,
"loss": 1.0526,
"step": 1075
},
{
"epoch": 0.51,
"learning_rate": 0.0002849966870142775,
"loss": 0.7322,
"step": 1100
},
{
"epoch": 0.52,
"learning_rate": 0.00028417141836187374,
"loss": 1.0509,
"step": 1125
},
{
"epoch": 0.53,
"learning_rate": 0.0002833253172982385,
"loss": 0.7245,
"step": 1150
},
{
"epoch": 0.54,
"learning_rate": 0.0002824585151950568,
"loss": 1.0746,
"step": 1175
},
{
"epoch": 0.55,
"learning_rate": 0.00028157114663820445,
"loss": 0.7327,
"step": 1200
},
{
"epoch": 0.57,
"learning_rate": 0.0002806633494068519,
"loss": 1.0685,
"step": 1225
},
{
"epoch": 0.58,
"learning_rate": 0.00027973526445207084,
"loss": 0.7457,
"step": 1250
},
{
"epoch": 0.59,
"learning_rate": 0.00027878703587495,
"loss": 1.0731,
"step": 1275
},
{
"epoch": 0.6,
"learning_rate": 0.00027781881090422017,
"loss": 0.728,
"step": 1300
},
{
"epoch": 0.61,
"learning_rate": 0.00027683073987339523,
"loss": 1.0638,
"step": 1325
},
{
"epoch": 0.62,
"learning_rate": 0.0002758229761974294,
"loss": 0.734,
"step": 1350
},
{
"epoch": 0.63,
"learning_rate": 0.0002747956763488979,
"loss": 1.0548,
"step": 1375
},
{
"epoch": 0.65,
"learning_rate": 0.0002737489998337011,
"loss": 0.7357,
"step": 1400
},
{
"epoch": 0.66,
"learning_rate": 0.00027268310916629885,
"loss": 1.0528,
"step": 1425
},
{
"epoch": 0.67,
"learning_rate": 0.0002715981698444775,
"loss": 0.7227,
"step": 1450
},
{
"epoch": 0.68,
"learning_rate": 0.0002704943503236531,
"loss": 1.0546,
"step": 1475
},
{
"epoch": 0.69,
"learning_rate": 0.00026937182199071613,
"loss": 0.7228,
"step": 1500
},
{
"epoch": 0.7,
"learning_rate": 0.0002682307591374206,
"loss": 1.052,
"step": 1525
},
{
"epoch": 0.72,
"learning_rate": 0.0002670713389333225,
"loss": 0.729,
"step": 1550
},
{
"epoch": 0.73,
"learning_rate": 0.0002658937413982708,
"loss": 1.0472,
"step": 1575
},
{
"epoch": 0.74,
"learning_rate": 0.00026469814937445655,
"loss": 0.7294,
"step": 1600
},
{
"epoch": 0.75,
"learning_rate": 0.00026348474849802357,
"loss": 1.0383,
"step": 1625
},
{
"epoch": 0.76,
"learning_rate": 0.000262253727170245,
"loss": 0.723,
"step": 1650
},
{
"epoch": 0.77,
"learning_rate": 0.0002610052765282711,
"loss": 1.0467,
"step": 1675
},
{
"epoch": 0.78,
"learning_rate": 0.0002597395904154515,
"loss": 0.7191,
"step": 1700
},
{
"epoch": 0.8,
"learning_rate": 0.0002584568653512381,
"loss": 1.0481,
"step": 1725
},
{
"epoch": 0.81,
"learning_rate": 0.0002571573005006718,
"loss": 0.6965,
"step": 1750
},
{
"epoch": 0.82,
"learning_rate": 0.00025584109764345905,
"loss": 1.0579,
"step": 1775
},
{
"epoch": 0.83,
"learning_rate": 0.0002545084611426415,
"loss": 0.7132,
"step": 1800
},
{
"epoch": 0.84,
"learning_rate": 0.00025315959791286585,
"loss": 1.0537,
"step": 1825
},
{
"epoch": 0.85,
"learning_rate": 0.0002517947173882562,
"loss": 0.7253,
"step": 1850
},
{
"epoch": 0.87,
"learning_rate": 0.0002504140314898962,
"loss": 1.0335,
"step": 1875
},
{
"epoch": 0.88,
"learning_rate": 0.0002490177545929246,
"loss": 0.7112,
"step": 1900
},
{
"epoch": 0.89,
"learning_rate": 0.00024760610349324955,
"loss": 1.07,
"step": 1925
},
{
"epoch": 0.9,
"learning_rate": 0.0002461792973738878,
"loss": 0.7135,
"step": 1950
},
{
"epoch": 0.91,
"learning_rate": 0.00024473755777093244,
"loss": 1.0404,
"step": 1975
},
{
"epoch": 0.92,
"learning_rate": 0.00024328110853915556,
"loss": 0.714,
"step": 2000
},
{
"epoch": 0.93,
"learning_rate": 0.00024181017581725143,
"loss": 1.051,
"step": 2025
},
{
"epoch": 0.95,
"learning_rate": 0.00024032498799272402,
"loss": 0.725,
"step": 2050
},
{
"epoch": 0.96,
"learning_rate": 0.0002388257756664263,
"loss": 1.054,
"step": 2075
},
{
"epoch": 0.97,
"learning_rate": 0.00023731277161675532,
"loss": 0.7111,
"step": 2100
},
{
"epoch": 0.98,
"learning_rate": 0.00023578621076350965,
"loss": 1.0462,
"step": 2125
},
{
"epoch": 0.99,
"learning_rate": 0.00023424633013141363,
"loss": 0.7244,
"step": 2150
},
{
"epoch": 1.0,
"learning_rate": 0.00023269336881331565,
"loss": 0.9686,
"step": 2175
},
{
"epoch": 1.02,
"learning_rate": 0.00023112756793306445,
"loss": 0.8741,
"step": 2200
},
{
"epoch": 1.03,
"learning_rate": 0.000229549170608071,
"loss": 0.7772,
"step": 2225
},
{
"epoch": 1.04,
"learning_rate": 0.00022795842191156,
"loss": 0.8901,
"step": 2250
},
{
"epoch": 1.05,
"learning_rate": 0.00022635556883451806,
"loss": 0.8139,
"step": 2275
},
{
"epoch": 1.06,
"learning_rate": 0.00022474086024734443,
"loss": 0.8782,
"step": 2300
},
{
"epoch": 1.07,
"learning_rate": 0.0002231145468612094,
"loss": 0.8002,
"step": 2325
},
{
"epoch": 1.08,
"learning_rate": 0.00022147688118912695,
"loss": 0.9184,
"step": 2350
},
{
"epoch": 1.1,
"learning_rate": 0.000219828117506748,
"loss": 0.7961,
"step": 2375
},
{
"epoch": 1.11,
"learning_rate": 0.0002181685118128797,
"loss": 0.9099,
"step": 2400
},
{
"epoch": 1.12,
"learning_rate": 0.00021649832178973727,
"loss": 0.8191,
"step": 2425
},
{
"epoch": 1.13,
"learning_rate": 0.00021481780676293428,
"loss": 0.925,
"step": 2450
},
{
"epoch": 1.14,
"learning_rate": 0.00021312722766121808,
"loss": 0.7863,
"step": 2475
},
{
"epoch": 1.15,
"learning_rate": 0.00021142684697595605,
"loss": 0.9065,
"step": 2500
},
{
"epoch": 1.17,
"learning_rate": 0.00020971692872037923,
"loss": 0.7908,
"step": 2525
},
{
"epoch": 1.18,
"learning_rate": 0.00020799773838859004,
"loss": 0.9115,
"step": 2550
},
{
"epoch": 1.19,
"learning_rate": 0.00020626954291433935,
"loss": 0.7844,
"step": 2575
},
{
"epoch": 1.2,
"learning_rate": 0.0002045326106295808,
"loss": 0.9132,
"step": 2600
},
{
"epoch": 1.21,
"learning_rate": 0.0002027872112228075,
"loss": 0.7805,
"step": 2625
},
{
"epoch": 1.22,
"learning_rate": 0.00020103361569717837,
"loss": 0.9106,
"step": 2650
},
{
"epoch": 1.23,
"learning_rate": 0.00019927209632844006,
"loss": 0.791,
"step": 2675
},
{
"epoch": 1.25,
"learning_rate": 0.00019750292662265174,
"loss": 0.8947,
"step": 2700
},
{
"epoch": 1.26,
"learning_rate": 0.00019572638127371837,
"loss": 0.7896,
"step": 2725
},
{
"epoch": 1.27,
"learning_rate": 0.00019394273612073992,
"loss": 0.8987,
"step": 2750
},
{
"epoch": 1.28,
"learning_rate": 0.00019215226810518254,
"loss": 0.8036,
"step": 2775
},
{
"epoch": 1.29,
"learning_rate": 0.0001903552552278787,
"loss": 0.9274,
"step": 2800
},
{
"epoch": 1.3,
"learning_rate": 0.0001885519765058627,
"loss": 0.7937,
"step": 2825
},
{
"epoch": 1.32,
"learning_rate": 0.0001867427119290488,
"loss": 0.8966,
"step": 2850
},
{
"epoch": 1.33,
"learning_rate": 0.00018492774241675742,
"loss": 0.7985,
"step": 2875
},
{
"epoch": 1.34,
"learning_rate": 0.00018310734977409826,
"loss": 0.8964,
"step": 2900
},
{
"epoch": 1.35,
"learning_rate": 0.00018128181664821475,
"loss": 0.7905,
"step": 2925
},
{
"epoch": 1.36,
"learning_rate": 0.00017945142648439843,
"loss": 0.8919,
"step": 2950
},
{
"epoch": 1.37,
"learning_rate": 0.00017761646348207917,
"loss": 0.8017,
"step": 2975
},
{
"epoch": 1.38,
"learning_rate": 0.0001757772125506984,
"loss": 0.8873,
"step": 3000
},
{
"epoch": 1.4,
"learning_rate": 0.00017393395926547202,
"loss": 0.7904,
"step": 3025
},
{
"epoch": 1.41,
"learning_rate": 0.00017208698982304984,
"loss": 0.9017,
"step": 3050
},
{
"epoch": 1.42,
"learning_rate": 0.00017023659099707876,
"loss": 0.7917,
"step": 3075
},
{
"epoch": 1.43,
"learning_rate": 0.00016838305009367628,
"loss": 0.9097,
"step": 3100
},
{
"epoch": 1.44,
"learning_rate": 0.00016652665490682128,
"loss": 0.8175,
"step": 3125
},
{
"epoch": 1.45,
"learning_rate": 0.00016466769367366887,
"loss": 0.8899,
"step": 3150
},
{
"epoch": 1.47,
"learning_rate": 0.00016280645502979695,
"loss": 0.7847,
"step": 3175
},
{
"epoch": 1.48,
"learning_rate": 0.00016094322796439043,
"loss": 0.8796,
"step": 3200
},
{
"epoch": 1.49,
"learning_rate": 0.00015907830177537078,
"loss": 0.7959,
"step": 3225
},
{
"epoch": 1.5,
"learning_rate": 0.00015721196602447735,
"loss": 0.888,
"step": 3250
},
{
"epoch": 1.51,
"learning_rate": 0.0001553445104923083,
"loss": 0.7741,
"step": 3275
},
{
"epoch": 1.52,
"learning_rate": 0.00015347622513332685,
"loss": 0.9118,
"step": 3300
},
{
"epoch": 1.53,
"learning_rate": 0.00015160740003084127,
"loss": 0.7928,
"step": 3325
},
{
"epoch": 1.55,
"learning_rate": 0.00014973832535196402,
"loss": 0.8985,
"step": 3350
},
{
"epoch": 1.56,
"learning_rate": 0.00014786929130255872,
"loss": 0.7885,
"step": 3375
},
{
"epoch": 1.57,
"learning_rate": 0.00014600058808218047,
"loss": 0.8915,
"step": 3400
},
{
"epoch": 1.58,
"learning_rate": 0.0001441325058390176,
"loss": 0.775,
"step": 3425
},
{
"epoch": 1.59,
"learning_rate": 0.00014226533462484097,
"loss": 0.9084,
"step": 3450
},
{
"epoch": 1.6,
"learning_rate": 0.0001403993643499688,
"loss": 0.783,
"step": 3475
},
{
"epoch": 1.62,
"learning_rate": 0.00013853488473825263,
"loss": 0.897,
"step": 3500
},
{
"epoch": 1.63,
"learning_rate": 0.00013667218528209314,
"loss": 0.7799,
"step": 3525
},
{
"epoch": 1.64,
"learning_rate": 0.00013481155519749142,
"loss": 0.9029,
"step": 3550
},
{
"epoch": 1.65,
"learning_rate": 0.0001329532833791431,
"loss": 0.7867,
"step": 3575
},
{
"epoch": 1.66,
"learning_rate": 0.00013109765835558256,
"loss": 0.8932,
"step": 3600
},
{
"epoch": 1.67,
"learning_rate": 0.00012924496824438401,
"loss": 0.7868,
"step": 3625
},
{
"epoch": 1.68,
"learning_rate": 0.00012739550070742616,
"loss": 0.8851,
"step": 3650
},
{
"epoch": 1.7,
"learning_rate": 0.00012554954290622798,
"loss": 0.7783,
"step": 3675
},
{
"epoch": 1.71,
"learning_rate": 0.0001237073814573617,
"loss": 0.8929,
"step": 3700
},
{
"epoch": 1.72,
"learning_rate": 0.00012186930238795088,
"loss": 0.8003,
"step": 3725
},
{
"epoch": 1.73,
"learning_rate": 0.00012003559109125972,
"loss": 0.8833,
"step": 3750
},
{
"epoch": 1.74,
"learning_rate": 0.00011820653228238095,
"loss": 0.7831,
"step": 3775
},
{
"epoch": 1.75,
"learning_rate": 0.00011638240995402873,
"loss": 0.8936,
"step": 3800
},
{
"epoch": 1.77,
"learning_rate": 0.00011456350733244412,
"loss": 0.7678,
"step": 3825
},
{
"epoch": 1.78,
"learning_rate": 0.00011275010683341939,
"loss": 0.9017,
"step": 3850
},
{
"epoch": 1.79,
"learning_rate": 0.00011094249001844783,
"loss": 0.7693,
"step": 3875
},
{
"epoch": 1.8,
"learning_rate": 0.0001091409375510068,
"loss": 0.8821,
"step": 3900
},
{
"epoch": 1.81,
"learning_rate": 0.00010734572915297985,
"loss": 0.7796,
"step": 3925
},
{
"epoch": 1.82,
"learning_rate": 0.000105557143561225,
"loss": 0.8948,
"step": 3950
},
{
"epoch": 1.83,
"learning_rate": 0.00010377545848429614,
"loss": 0.7797,
"step": 3975
},
{
"epoch": 1.85,
"learning_rate": 0.00010200095055932406,
"loss": 0.8789,
"step": 4000
},
{
"epoch": 1.86,
"learning_rate": 0.00010023389530906374,
"loss": 0.791,
"step": 4025
},
{
"epoch": 1.87,
"learning_rate": 9.847456709911478e-05,
"loss": 0.8838,
"step": 4050
},
{
"epoch": 1.88,
"learning_rate": 9.672323909532129e-05,
"loss": 0.7831,
"step": 4075
},
{
"epoch": 1.89,
"learning_rate": 9.498018322135838e-05,
"loss": 0.8763,
"step": 4100
},
{
"epoch": 1.9,
"learning_rate": 9.324567011651136e-05,
"loss": 0.8028,
"step": 4125
},
{
"epoch": 1.92,
"learning_rate": 9.151996909365419e-05,
"loss": 0.9126,
"step": 4150
},
{
"epoch": 1.93,
"learning_rate": 8.980334809743414e-05,
"loss": 0.7953,
"step": 4175
},
{
"epoch": 1.94,
"learning_rate": 8.809607366266878e-05,
"loss": 0.8934,
"step": 4200
},
{
"epoch": 1.95,
"learning_rate": 8.639841087296207e-05,
"loss": 0.7763,
"step": 4225
},
{
"epoch": 1.96,
"learning_rate": 8.471062331954525e-05,
"loss": 0.8913,
"step": 4250
},
{
"epoch": 1.97,
"learning_rate": 8.303297306034988e-05,
"loss": 0.7765,
"step": 4275
},
{
"epoch": 1.98,
"learning_rate": 8.136572057931912e-05,
"loss": 0.8763,
"step": 4300
},
{
"epoch": 2.0,
"learning_rate": 7.970912474596287e-05,
"loss": 0.7483,
"step": 4325
},
{
"epoch": 2.01,
"learning_rate": 7.806344277516381e-05,
"loss": 0.9051,
"step": 4350
},
{
"epoch": 2.02,
"learning_rate": 7.642893018724062e-05,
"loss": 0.703,
"step": 4375
},
{
"epoch": 2.03,
"learning_rate": 7.480584076827394e-05,
"loss": 0.8799,
"step": 4400
},
{
"epoch": 2.04,
"learning_rate": 7.319442653070165e-05,
"loss": 0.7338,
"step": 4425
},
{
"epoch": 2.05,
"learning_rate": 7.159493767418978e-05,
"loss": 0.8673,
"step": 4450
},
{
"epoch": 2.07,
"learning_rate": 7.000762254678456e-05,
"loss": 0.7347,
"step": 4475
},
{
"epoch": 2.08,
"learning_rate": 6.843272760635235e-05,
"loss": 0.8641,
"step": 4500
},
{
"epoch": 2.09,
"learning_rate": 6.687049738231268e-05,
"loss": 0.7346,
"step": 4525
},
{
"epoch": 2.1,
"learning_rate": 6.532117443767072e-05,
"loss": 0.8766,
"step": 4550
},
{
"epoch": 2.11,
"learning_rate": 6.378499933135577e-05,
"loss": 0.7404,
"step": 4575
},
{
"epoch": 2.12,
"learning_rate": 6.226221058086958e-05,
"loss": 0.8855,
"step": 4600
},
{
"epoch": 2.13,
"learning_rate": 6.0753044625253063e-05,
"loss": 0.741,
"step": 4625
},
{
"epoch": 2.15,
"learning_rate": 5.9257735788374854e-05,
"loss": 0.8679,
"step": 4650
},
{
"epoch": 2.16,
"learning_rate": 5.7776516242548395e-05,
"loss": 0.7291,
"step": 4675
},
{
"epoch": 2.17,
"learning_rate": 5.6309615972483455e-05,
"loss": 0.8408,
"step": 4700
},
{
"epoch": 2.18,
"learning_rate": 5.485726273957677e-05,
"loss": 0.7349,
"step": 4725
},
{
"epoch": 2.19,
"learning_rate": 5.341968204654852e-05,
"loss": 0.8473,
"step": 4750
},
{
"epoch": 2.2,
"learning_rate": 5.199709710242896e-05,
"loss": 0.7233,
"step": 4775
},
{
"epoch": 2.22,
"learning_rate": 5.058972878790129e-05,
"loss": 0.8649,
"step": 4800
},
{
"epoch": 2.23,
"learning_rate": 4.919779562100643e-05,
"loss": 0.7435,
"step": 4825
},
{
"epoch": 2.24,
"learning_rate": 4.782151372321425e-05,
"loss": 0.8768,
"step": 4850
},
{
"epoch": 2.25,
"learning_rate": 4.646109678586675e-05,
"loss": 0.7419,
"step": 4875
},
{
"epoch": 2.26,
"learning_rate": 4.511675603699927e-05,
"loss": 0.8745,
"step": 4900
},
{
"epoch": 2.27,
"learning_rate": 4.3788700208543235e-05,
"loss": 0.7294,
"step": 4925
},
{
"epoch": 2.28,
"learning_rate": 4.2477135503917515e-05,
"loss": 0.8768,
"step": 4950
},
{
"epoch": 2.3,
"learning_rate": 4.1182265566011184e-05,
"loss": 0.74,
"step": 4975
},
{
"epoch": 2.31,
"learning_rate": 3.9904291445564937e-05,
"loss": 0.8695,
"step": 5000
},
{
"epoch": 2.32,
"learning_rate": 3.864341156995439e-05,
"loss": 0.7214,
"step": 5025
},
{
"epoch": 2.33,
"learning_rate": 3.739982171238066e-05,
"loss": 0.8579,
"step": 5050
},
{
"epoch": 2.34,
"learning_rate": 3.617371496147347e-05,
"loss": 0.7466,
"step": 5075
},
{
"epoch": 2.35,
"learning_rate": 3.4965281691310755e-05,
"loss": 0.8625,
"step": 5100
},
{
"epoch": 2.37,
"learning_rate": 3.3774709531859636e-05,
"loss": 0.7496,
"step": 5125
},
{
"epoch": 2.38,
"learning_rate": 3.260218333984402e-05,
"loss": 0.8741,
"step": 5150
},
{
"epoch": 2.39,
"learning_rate": 3.144788517004204e-05,
"loss": 0.7245,
"step": 5175
},
{
"epoch": 2.4,
"learning_rate": 3.0311994247019213e-05,
"loss": 0.8712,
"step": 5200
},
{
"epoch": 2.41,
"learning_rate": 2.9194686937300664e-05,
"loss": 0.7163,
"step": 5225
},
{
"epoch": 2.42,
"learning_rate": 2.8096136721987088e-05,
"loss": 0.8638,
"step": 5250
},
{
"epoch": 2.43,
"learning_rate": 2.7016514169819016e-05,
"loss": 0.7206,
"step": 5275
},
{
"epoch": 2.45,
"learning_rate": 2.595598691069304e-05,
"loss": 0.8693,
"step": 5300
},
{
"epoch": 2.46,
"learning_rate": 2.491471960963434e-05,
"loss": 0.7308,
"step": 5325
},
{
"epoch": 2.47,
"learning_rate": 2.3892873941229695e-05,
"loss": 0.8619,
"step": 5350
},
{
"epoch": 2.48,
"learning_rate": 2.2890608564524616e-05,
"loss": 0.7062,
"step": 5375
},
{
"epoch": 2.49,
"learning_rate": 2.190807909838896e-05,
"loss": 0.8553,
"step": 5400
},
{
"epoch": 2.5,
"learning_rate": 2.0945438097354384e-05,
"loss": 0.7415,
"step": 5425
},
{
"epoch": 2.52,
"learning_rate": 2.0002835027927544e-05,
"loss": 0.8702,
"step": 5450
},
{
"epoch": 2.53,
"learning_rate": 1.908041624538304e-05,
"loss": 0.7552,
"step": 5475
},
{
"epoch": 2.54,
"learning_rate": 1.817832497103901e-05,
"loss": 0.8496,
"step": 5500
},
{
"epoch": 2.55,
"learning_rate": 1.729670127001987e-05,
"loss": 0.7167,
"step": 5525
},
{
"epoch": 2.56,
"learning_rate": 1.643568202950854e-05,
"loss": 0.8556,
"step": 5550
},
{
"epoch": 2.57,
"learning_rate": 1.559540093749247e-05,
"loss": 0.7245,
"step": 5575
},
{
"epoch": 2.58,
"learning_rate": 1.4775988462006316e-05,
"loss": 0.8672,
"step": 5600
},
{
"epoch": 2.6,
"learning_rate": 1.3977571830874524e-05,
"loss": 0.7386,
"step": 5625
},
{
"epoch": 2.61,
"learning_rate": 1.3200275011956901e-05,
"loss": 0.862,
"step": 5650
},
{
"epoch": 2.62,
"learning_rate": 1.2444218693900826e-05,
"loss": 0.7431,
"step": 5675
},
{
"epoch": 2.63,
"learning_rate": 1.170952026740178e-05,
"loss": 0.8653,
"step": 5700
},
{
"epoch": 2.64,
"learning_rate": 1.0996293806976907e-05,
"loss": 0.743,
"step": 5725
},
{
"epoch": 2.65,
"learning_rate": 1.0304650053252634e-05,
"loss": 0.8539,
"step": 5750
},
{
"epoch": 2.67,
"learning_rate": 9.63469639577048e-06,
"loss": 0.7485,
"step": 5775
},
{
"epoch": 2.68,
"learning_rate": 8.986536856312987e-06,
"loss": 0.861,
"step": 5800
},
{
"epoch": 2.69,
"learning_rate": 8.360272072752406e-06,
"loss": 0.7405,
"step": 5825
},
{
"epoch": 2.7,
"learning_rate": 7.755999283425168e-06,
"loss": 0.8565,
"step": 5850
},
{
"epoch": 2.71,
"learning_rate": 7.1738123120338e-06,
"loss": 0.7328,
"step": 5875
},
{
"epoch": 2.72,
"learning_rate": 6.613801553079184e-06,
"loss": 0.8614,
"step": 5900
},
{
"epoch": 2.73,
"learning_rate": 6.076053957825411e-06,
"loss": 0.7389,
"step": 5925
},
{
"epoch": 2.75,
"learning_rate": 5.560653020798906e-06,
"loss": 0.8642,
"step": 5950
},
{
"epoch": 2.76,
"learning_rate": 5.067678766824651e-06,
"loss": 0.7141,
"step": 5975
},
{
"epoch": 2.77,
"learning_rate": 4.597207738600911e-06,
"loss": 0.8602,
"step": 6000
}
],
"max_steps": 6498,
"num_train_epochs": 3,
"total_flos": 4.8008476405304525e+17,
"trial_name": null,
"trial_params": null
}