Text Generation
Transformers
PyTorch
English
llama
code
text-generation-inference
Inference Endpoints
IF-CL-34B / trainer_state.json
Pasta009's picture
Upload folder using huggingface_hub
64596fb verified
raw
history blame
20.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9987871437234688,
"eval_steps": 500,
"global_step": 1648,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1.9998183053318547e-05,
"loss": 0.6109,
"step": 10
},
{
"epoch": 0.02,
"learning_rate": 1.9992732873533223e-05,
"loss": 0.5587,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 1.9983651441181253e-05,
"loss": 0.5352,
"step": 30
},
{
"epoch": 0.05,
"learning_rate": 1.997094205635831e-05,
"loss": 0.5315,
"step": 40
},
{
"epoch": 0.06,
"learning_rate": 1.99546093375193e-05,
"loss": 0.5115,
"step": 50
},
{
"epoch": 0.07,
"learning_rate": 1.99346592198001e-05,
"loss": 0.5241,
"step": 60
},
{
"epoch": 0.08,
"learning_rate": 1.9911098952860726e-05,
"loss": 0.5085,
"step": 70
},
{
"epoch": 0.1,
"learning_rate": 1.9883937098250962e-05,
"loss": 0.4903,
"step": 80
},
{
"epoch": 0.11,
"learning_rate": 1.985318352629912e-05,
"loss": 0.5028,
"step": 90
},
{
"epoch": 0.12,
"learning_rate": 1.9818849412525294e-05,
"loss": 0.5097,
"step": 100
},
{
"epoch": 0.13,
"learning_rate": 1.978094723358031e-05,
"loss": 0.5015,
"step": 110
},
{
"epoch": 0.15,
"learning_rate": 1.9739490762711812e-05,
"loss": 0.5086,
"step": 120
},
{
"epoch": 0.16,
"learning_rate": 1.969449506475924e-05,
"loss": 0.497,
"step": 130
},
{
"epoch": 0.17,
"learning_rate": 1.9645976490679402e-05,
"loss": 0.5085,
"step": 140
},
{
"epoch": 0.18,
"learning_rate": 1.9593952671604737e-05,
"loss": 0.4994,
"step": 150
},
{
"epoch": 0.19,
"learning_rate": 1.953844251243633e-05,
"loss": 0.4923,
"step": 160
},
{
"epoch": 0.21,
"learning_rate": 1.947946618497407e-05,
"loss": 0.4887,
"step": 170
},
{
"epoch": 0.22,
"learning_rate": 1.941704512058646e-05,
"loss": 0.496,
"step": 180
},
{
"epoch": 0.23,
"learning_rate": 1.9351202002422654e-05,
"loss": 0.5039,
"step": 190
},
{
"epoch": 0.24,
"learning_rate": 1.928196075716966e-05,
"loss": 0.4888,
"step": 200
},
{
"epoch": 0.25,
"learning_rate": 1.920934654635764e-05,
"loss": 0.4972,
"step": 210
},
{
"epoch": 0.27,
"learning_rate": 1.9133385757216458e-05,
"loss": 0.4968,
"step": 220
},
{
"epoch": 0.28,
"learning_rate": 1.905410599308687e-05,
"loss": 0.4952,
"step": 230
},
{
"epoch": 0.29,
"learning_rate": 1.8971536063389745e-05,
"loss": 0.4831,
"step": 240
},
{
"epoch": 0.3,
"learning_rate": 1.888570597315703e-05,
"loss": 0.4836,
"step": 250
},
{
"epoch": 0.32,
"learning_rate": 1.8796646912128247e-05,
"loss": 0.4873,
"step": 260
},
{
"epoch": 0.33,
"learning_rate": 1.8704391243416478e-05,
"loss": 0.4773,
"step": 270
},
{
"epoch": 0.34,
"learning_rate": 1.8608972491747946e-05,
"loss": 0.49,
"step": 280
},
{
"epoch": 0.35,
"learning_rate": 1.8510425331279488e-05,
"loss": 0.4892,
"step": 290
},
{
"epoch": 0.36,
"learning_rate": 1.8408785572998335e-05,
"loss": 0.4794,
"step": 300
},
{
"epoch": 0.38,
"learning_rate": 1.8304090151708797e-05,
"loss": 0.4829,
"step": 310
},
{
"epoch": 0.39,
"learning_rate": 1.8196377112610524e-05,
"loss": 0.4814,
"step": 320
},
{
"epoch": 0.4,
"learning_rate": 1.808568559747331e-05,
"loss": 0.4871,
"step": 330
},
{
"epoch": 0.41,
"learning_rate": 1.7972055830413372e-05,
"loss": 0.4824,
"step": 340
},
{
"epoch": 0.42,
"learning_rate": 1.7855529103276337e-05,
"loss": 0.483,
"step": 350
},
{
"epoch": 0.44,
"learning_rate": 1.773614776063225e-05,
"loss": 0.4701,
"step": 360
},
{
"epoch": 0.45,
"learning_rate": 1.761395518438797e-05,
"loss": 0.4859,
"step": 370
},
{
"epoch": 0.46,
"learning_rate": 1.7488995778022687e-05,
"loss": 0.4622,
"step": 380
},
{
"epoch": 0.47,
"learning_rate": 1.7361314950452137e-05,
"loss": 0.4784,
"step": 390
},
{
"epoch": 0.49,
"learning_rate": 1.7230959099527512e-05,
"loss": 0.4888,
"step": 400
},
{
"epoch": 0.5,
"learning_rate": 1.709797559517496e-05,
"loss": 0.4734,
"step": 410
},
{
"epoch": 0.51,
"learning_rate": 1.6962412762181867e-05,
"loss": 0.4801,
"step": 420
},
{
"epoch": 0.52,
"learning_rate": 1.6824319862636137e-05,
"loss": 0.4895,
"step": 430
},
{
"epoch": 0.53,
"learning_rate": 1.6683747078024887e-05,
"loss": 0.4671,
"step": 440
},
{
"epoch": 0.55,
"learning_rate": 1.654074549099901e-05,
"loss": 0.4881,
"step": 450
},
{
"epoch": 0.56,
"learning_rate": 1.6395367066810312e-05,
"loss": 0.4781,
"step": 460
},
{
"epoch": 0.57,
"learning_rate": 1.6247664634427866e-05,
"loss": 0.488,
"step": 470
},
{
"epoch": 0.58,
"learning_rate": 1.6097691867340547e-05,
"loss": 0.4708,
"step": 480
},
{
"epoch": 0.59,
"learning_rate": 1.5945503264052638e-05,
"loss": 0.4811,
"step": 490
},
{
"epoch": 0.61,
"learning_rate": 1.5791154128279694e-05,
"loss": 0.4651,
"step": 500
},
{
"epoch": 0.62,
"learning_rate": 1.5634700548851713e-05,
"loss": 0.463,
"step": 510
},
{
"epoch": 0.63,
"learning_rate": 1.547619937933108e-05,
"loss": 0.4769,
"step": 520
},
{
"epoch": 0.64,
"learning_rate": 1.53157082173526e-05,
"loss": 0.4791,
"step": 530
},
{
"epoch": 0.65,
"learning_rate": 1.5153285383693091e-05,
"loss": 0.4905,
"step": 540
},
{
"epoch": 0.67,
"learning_rate": 1.4988989901078286e-05,
"loss": 0.4607,
"step": 550
},
{
"epoch": 0.68,
"learning_rate": 1.4822881472734563e-05,
"loss": 0.4764,
"step": 560
},
{
"epoch": 0.69,
"learning_rate": 1.4655020460693452e-05,
"loss": 0.489,
"step": 570
},
{
"epoch": 0.7,
"learning_rate": 1.4485467863856704e-05,
"loss": 0.4744,
"step": 580
},
{
"epoch": 0.72,
"learning_rate": 1.4314285295829957e-05,
"loss": 0.4722,
"step": 590
},
{
"epoch": 0.73,
"learning_rate": 1.4141534962532986e-05,
"loss": 0.4588,
"step": 600
},
{
"epoch": 0.74,
"learning_rate": 1.3967279639594753e-05,
"loss": 0.4779,
"step": 610
},
{
"epoch": 0.75,
"learning_rate": 1.3791582649541404e-05,
"loss": 0.4629,
"step": 620
},
{
"epoch": 0.76,
"learning_rate": 1.3614507838785547e-05,
"loss": 0.4826,
"step": 630
},
{
"epoch": 0.78,
"learning_rate": 1.3436119554425133e-05,
"loss": 0.4615,
"step": 640
},
{
"epoch": 0.79,
"learning_rate": 1.3256482620860415e-05,
"loss": 0.476,
"step": 650
},
{
"epoch": 0.8,
"learning_rate": 1.3075662316237466e-05,
"loss": 0.4746,
"step": 660
},
{
"epoch": 0.81,
"learning_rate": 1.2893724348726757e-05,
"loss": 0.4627,
"step": 670
},
{
"epoch": 0.82,
"learning_rate": 1.2710734832645557e-05,
"loss": 0.4645,
"step": 680
},
{
"epoch": 0.84,
"learning_rate": 1.2526760264432658e-05,
"loss": 0.472,
"step": 690
},
{
"epoch": 0.85,
"learning_rate": 1.2341867498484303e-05,
"loss": 0.4653,
"step": 700
},
{
"epoch": 0.86,
"learning_rate": 1.2156123722859989e-05,
"loss": 0.4713,
"step": 710
},
{
"epoch": 0.87,
"learning_rate": 1.1969596434867063e-05,
"loss": 0.4661,
"step": 720
},
{
"epoch": 0.89,
"learning_rate": 1.1782353416532908e-05,
"loss": 0.4761,
"step": 730
},
{
"epoch": 0.9,
"learning_rate": 1.1594462709973684e-05,
"loss": 0.4617,
"step": 740
},
{
"epoch": 0.91,
"learning_rate": 1.140599259266854e-05,
"loss": 0.4595,
"step": 750
},
{
"epoch": 0.92,
"learning_rate": 1.1217011552648316e-05,
"loss": 0.4677,
"step": 760
},
{
"epoch": 0.93,
"learning_rate": 1.102758826360772e-05,
"loss": 0.4712,
"step": 770
},
{
"epoch": 0.95,
"learning_rate": 1.0837791559950029e-05,
"loss": 0.4613,
"step": 780
},
{
"epoch": 0.96,
"learning_rate": 1.0647690411773415e-05,
"loss": 0.4598,
"step": 790
},
{
"epoch": 0.97,
"learning_rate": 1.0457353899807947e-05,
"loss": 0.4803,
"step": 800
},
{
"epoch": 0.98,
"learning_rate": 1.0266851190312375e-05,
"loss": 0.4549,
"step": 810
},
{
"epoch": 0.99,
"learning_rate": 1.0076251509939867e-05,
"loss": 0.463,
"step": 820
},
{
"epoch": 1.01,
"learning_rate": 9.885624120581772e-06,
"loss": 0.4129,
"step": 830
},
{
"epoch": 1.02,
"learning_rate": 9.695038294198588e-06,
"loss": 0.3427,
"step": 840
},
{
"epoch": 1.03,
"learning_rate": 9.504563287647265e-06,
"loss": 0.3457,
"step": 850
},
{
"epoch": 1.04,
"learning_rate": 9.314268317514023e-06,
"loss": 0.3401,
"step": 860
},
{
"epoch": 1.06,
"learning_rate": 9.12422253496175e-06,
"loss": 0.3354,
"step": 870
},
{
"epoch": 1.07,
"learning_rate": 8.934495000601241e-06,
"loss": 0.3391,
"step": 880
},
{
"epoch": 1.08,
"learning_rate": 8.745154659395273e-06,
"loss": 0.3428,
"step": 890
},
{
"epoch": 1.09,
"learning_rate": 8.556270315604779e-06,
"loss": 0.3339,
"step": 900
},
{
"epoch": 1.1,
"learning_rate": 8.36791060778608e-06,
"loss": 0.328,
"step": 910
},
{
"epoch": 1.12,
"learning_rate": 8.180143983848388e-06,
"loss": 0.3328,
"step": 920
},
{
"epoch": 1.13,
"learning_rate": 7.993038676180546e-06,
"loss": 0.3288,
"step": 930
},
{
"epoch": 1.14,
"learning_rate": 7.806662676856134e-06,
"loss": 0.3327,
"step": 940
},
{
"epoch": 1.15,
"learning_rate": 7.62108371292584e-06,
"loss": 0.3421,
"step": 950
},
{
"epoch": 1.16,
"learning_rate": 7.436369221806201e-06,
"loss": 0.3462,
"step": 960
},
{
"epoch": 1.18,
"learning_rate": 7.2525863267735405e-06,
"loss": 0.3302,
"step": 970
},
{
"epoch": 1.19,
"learning_rate": 7.069801812572117e-06,
"loss": 0.3278,
"step": 980
},
{
"epoch": 1.2,
"learning_rate": 6.888082101145222e-06,
"loss": 0.3402,
"step": 990
},
{
"epoch": 1.21,
"learning_rate": 6.707493227498187e-06,
"loss": 0.344,
"step": 1000
},
{
"epoch": 1.22,
"learning_rate": 6.5281008157019425e-06,
"loss": 0.3349,
"step": 1010
},
{
"epoch": 1.24,
"learning_rate": 6.3499700550459554e-06,
"loss": 0.3379,
"step": 1020
},
{
"epoch": 1.25,
"learning_rate": 6.173165676349103e-06,
"loss": 0.3398,
"step": 1030
},
{
"epoch": 1.26,
"learning_rate": 5.99775192843722e-06,
"loss": 0.3362,
"step": 1040
},
{
"epoch": 1.27,
"learning_rate": 5.823792554795738e-06,
"loss": 0.3316,
"step": 1050
},
{
"epoch": 1.29,
"learning_rate": 5.6513507704059835e-06,
"loss": 0.3406,
"step": 1060
},
{
"epoch": 1.3,
"learning_rate": 5.480489238773536e-06,
"loss": 0.3266,
"step": 1070
},
{
"epoch": 1.31,
"learning_rate": 5.311270049156967e-06,
"loss": 0.3393,
"step": 1080
},
{
"epoch": 1.32,
"learning_rate": 5.14375469400529e-06,
"loss": 0.3286,
"step": 1090
},
{
"epoch": 1.33,
"learning_rate": 4.978004046612224e-06,
"loss": 0.3338,
"step": 1100
},
{
"epoch": 1.35,
"learning_rate": 4.814078338995516e-06,
"loss": 0.3247,
"step": 1110
},
{
"epoch": 1.36,
"learning_rate": 4.652037140009259e-06,
"loss": 0.3184,
"step": 1120
},
{
"epoch": 1.37,
"learning_rate": 4.491939333697205e-06,
"loss": 0.3297,
"step": 1130
},
{
"epoch": 1.38,
"learning_rate": 4.333843097894932e-06,
"loss": 0.3378,
"step": 1140
},
{
"epoch": 1.39,
"learning_rate": 4.177805883088641e-06,
"loss": 0.3426,
"step": 1150
},
{
"epoch": 1.41,
"learning_rate": 4.023884391538244e-06,
"loss": 0.3354,
"step": 1160
},
{
"epoch": 1.42,
"learning_rate": 3.8721345566724156e-06,
"loss": 0.3348,
"step": 1170
},
{
"epoch": 1.43,
"learning_rate": 3.722611522762917e-06,
"loss": 0.3314,
"step": 1180
},
{
"epoch": 1.44,
"learning_rate": 3.575369624885803e-06,
"loss": 0.3358,
"step": 1190
},
{
"epoch": 1.46,
"learning_rate": 3.4304623691766193e-06,
"loss": 0.3258,
"step": 1200
},
{
"epoch": 1.47,
"learning_rate": 3.287942413386841e-06,
"loss": 0.3296,
"step": 1210
},
{
"epoch": 1.48,
"learning_rate": 3.147861547748612e-06,
"loss": 0.3364,
"step": 1220
},
{
"epoch": 1.49,
"learning_rate": 3.0102706761547264e-06,
"loss": 0.3206,
"step": 1230
},
{
"epoch": 1.5,
"learning_rate": 2.875219797660681e-06,
"loss": 0.3214,
"step": 1240
},
{
"epoch": 1.52,
"learning_rate": 2.7427579883155895e-06,
"loss": 0.3307,
"step": 1250
},
{
"epoch": 1.53,
"learning_rate": 2.612933383328432e-06,
"loss": 0.33,
"step": 1260
},
{
"epoch": 1.54,
"learning_rate": 2.4857931595762406e-06,
"loss": 0.3343,
"step": 1270
},
{
"epoch": 1.55,
"learning_rate": 2.3613835184605527e-06,
"loss": 0.3258,
"step": 1280
},
{
"epoch": 1.56,
"learning_rate": 2.239749669118272e-06,
"loss": 0.3277,
"step": 1290
},
{
"epoch": 1.58,
"learning_rate": 2.1209358119931845e-06,
"loss": 0.3368,
"step": 1300
},
{
"epoch": 1.59,
"learning_rate": 2.0049851227739746e-06,
"loss": 0.3297,
"step": 1310
},
{
"epoch": 1.6,
"learning_rate": 1.891939736704641e-06,
"loss": 0.3367,
"step": 1320
},
{
"epoch": 1.61,
"learning_rate": 1.7818407332729914e-06,
"loss": 0.3344,
"step": 1330
},
{
"epoch": 1.63,
"learning_rate": 1.6747281212828193e-06,
"loss": 0.3388,
"step": 1340
},
{
"epoch": 1.64,
"learning_rate": 1.570640824315095e-06,
"loss": 0.3333,
"step": 1350
},
{
"epoch": 1.65,
"learning_rate": 1.4696166665835853e-06,
"loss": 0.3313,
"step": 1360
},
{
"epoch": 1.66,
"learning_rate": 1.3716923591899167e-06,
"loss": 0.3355,
"step": 1370
},
{
"epoch": 1.67,
"learning_rate": 1.2769034867831588e-06,
"loss": 0.3287,
"step": 1380
},
{
"epoch": 1.69,
"learning_rate": 1.1852844946287434e-06,
"loss": 0.3294,
"step": 1390
},
{
"epoch": 1.7,
"learning_rate": 1.0968686760914248e-06,
"loss": 0.3369,
"step": 1400
},
{
"epoch": 1.71,
"learning_rate": 1.0116881605368112e-06,
"loss": 0.3289,
"step": 1410
},
{
"epoch": 1.72,
"learning_rate": 9.297739016559226e-07,
"loss": 0.3242,
"step": 1420
},
{
"epoch": 1.73,
"learning_rate": 8.511556662169218e-07,
"loss": 0.3295,
"step": 1430
},
{
"epoch": 1.75,
"learning_rate": 7.758620232482083e-07,
"loss": 0.3257,
"step": 1440
},
{
"epoch": 1.76,
"learning_rate": 7.039203336567247e-07,
"loss": 0.3278,
"step": 1450
},
{
"epoch": 1.77,
"learning_rate": 6.353567402853056e-07,
"loss": 0.3199,
"step": 1460
},
{
"epoch": 1.78,
"learning_rate": 5.701961584126392e-07,
"loss": 0.3378,
"step": 1470
},
{
"epoch": 1.8,
"learning_rate": 5.084622666993244e-07,
"loss": 0.3218,
"step": 1480
},
{
"epoch": 1.81,
"learning_rate": 4.501774985832974e-07,
"loss": 0.326,
"step": 1490
},
{
"epoch": 1.82,
"learning_rate": 3.953630341277603e-07,
"loss": 0.3359,
"step": 1500
},
{
"epoch": 1.83,
"learning_rate": 3.440387923245714e-07,
"loss": 0.3304,
"step": 1510
},
{
"epoch": 1.84,
"learning_rate": 2.9622342385589256e-07,
"loss": 0.3239,
"step": 1520
},
{
"epoch": 1.86,
"learning_rate": 2.519343043167399e-07,
"loss": 0.333,
"step": 1530
},
{
"epoch": 1.87,
"learning_rate": 2.111875279008657e-07,
"loss": 0.316,
"step": 1540
},
{
"epoch": 1.88,
"learning_rate": 1.7399790155230633e-07,
"loss": 0.3345,
"step": 1550
},
{
"epoch": 1.89,
"learning_rate": 1.4037893958469994e-07,
"loss": 0.3284,
"step": 1560
},
{
"epoch": 1.9,
"learning_rate": 1.1034285877032147e-07,
"loss": 0.3235,
"step": 1570
},
{
"epoch": 1.92,
"learning_rate": 8.390057390064266e-08,
"loss": 0.3304,
"step": 1580
},
{
"epoch": 1.93,
"learning_rate": 6.10616938200137e-08,
"loss": 0.3197,
"step": 1590
},
{
"epoch": 1.94,
"learning_rate": 4.183451793390747e-08,
"loss": 0.3394,
"step": 1600
},
{
"epoch": 1.95,
"learning_rate": 2.6226033193007538e-08,
"loss": 0.3267,
"step": 1610
},
{
"epoch": 1.96,
"learning_rate": 1.424191155422583e-08,
"loss": 0.3329,
"step": 1620
},
{
"epoch": 1.98,
"learning_rate": 5.886507919570239e-09,
"loss": 0.3307,
"step": 1630
},
{
"epoch": 1.99,
"learning_rate": 1.1628585536216374e-09,
"loss": 0.3305,
"step": 1640
},
{
"epoch": 2.0,
"step": 1648,
"total_flos": 2536021641658368.0,
"train_loss": 0.40841181026500406,
"train_runtime": 72622.3244,
"train_samples_per_second": 5.811,
"train_steps_per_second": 0.023
}
],
"logging_steps": 10,
"max_steps": 1648,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 2536021641658368.0,
"trial_name": null,
"trial_params": null
}