Spaces:
Build error
Build error
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.999553770638108, | |
"eval_steps": 70, | |
"global_step": 280, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0178491744756805, | |
"grad_norm": 1.8217403888702393, | |
"learning_rate": 2.9761904761904763e-06, | |
"loss": 2.7425, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.035698348951361, | |
"grad_norm": 2.104698419570923, | |
"learning_rate": 5.9523809523809525e-06, | |
"loss": 2.861, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.0535475234270415, | |
"grad_norm": 2.7389333248138428, | |
"learning_rate": 8.92857142857143e-06, | |
"loss": 2.8281, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.071396697902722, | |
"grad_norm": 3.9298207759857178, | |
"learning_rate": 1.1904761904761905e-05, | |
"loss": 3.1888, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.0892458723784025, | |
"grad_norm": 2.648014783859253, | |
"learning_rate": 1.4880952380952381e-05, | |
"loss": 2.6461, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.107095046854083, | |
"grad_norm": 1.587472915649414, | |
"learning_rate": 1.785714285714286e-05, | |
"loss": 2.3212, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.1249442213297635, | |
"grad_norm": 0.8390935063362122, | |
"learning_rate": 2.0833333333333336e-05, | |
"loss": 1.8036, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.142793395805444, | |
"grad_norm": 0.46670979261398315, | |
"learning_rate": 2.380952380952381e-05, | |
"loss": 1.5552, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.1606425702811245, | |
"grad_norm": 0.45171597599983215, | |
"learning_rate": 2.6785714285714288e-05, | |
"loss": 1.6626, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.178491744756805, | |
"grad_norm": 0.5605499744415283, | |
"learning_rate": 2.9761904761904762e-05, | |
"loss": 1.4897, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.1963409192324855, | |
"grad_norm": 0.5553259253501892, | |
"learning_rate": 3.273809523809524e-05, | |
"loss": 1.5373, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.214190093708166, | |
"grad_norm": 0.6260251402854919, | |
"learning_rate": 3.571428571428572e-05, | |
"loss": 1.4779, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.2320392681838465, | |
"grad_norm": 0.6063796877861023, | |
"learning_rate": 3.8690476190476195e-05, | |
"loss": 1.483, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.249888442659527, | |
"grad_norm": 0.5549850463867188, | |
"learning_rate": 4.166666666666667e-05, | |
"loss": 1.5022, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.249888442659527, | |
"eval_loss": 1.451762318611145, | |
"eval_runtime": 17.7549, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.2677376171352075, | |
"grad_norm": 0.482930988073349, | |
"learning_rate": 4.464285714285715e-05, | |
"loss": 1.4256, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.285586791610888, | |
"grad_norm": 0.4240593910217285, | |
"learning_rate": 4.761904761904762e-05, | |
"loss": 1.3655, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.3034359660865685, | |
"grad_norm": 0.4872314929962158, | |
"learning_rate": 5.05952380952381e-05, | |
"loss": 1.4478, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.321285140562249, | |
"grad_norm": 0.42132768034935, | |
"learning_rate": 5.3571428571428575e-05, | |
"loss": 1.3305, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.3391343150379295, | |
"grad_norm": 0.6932046413421631, | |
"learning_rate": 5.6547619047619046e-05, | |
"loss": 1.4279, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.35698348951361, | |
"grad_norm": 0.6714524626731873, | |
"learning_rate": 5.9523809523809524e-05, | |
"loss": 1.4967, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.3748326639892905, | |
"grad_norm": 0.5682816505432129, | |
"learning_rate": 6.25e-05, | |
"loss": 1.4739, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.392681838464971, | |
"grad_norm": 0.7795937657356262, | |
"learning_rate": 6.547619047619048e-05, | |
"loss": 1.3751, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.4105310129406515, | |
"grad_norm": 0.8056842088699341, | |
"learning_rate": 6.845238095238096e-05, | |
"loss": 1.3699, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.428380187416332, | |
"grad_norm": 0.8373801112174988, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 1.4696, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.4462293618920125, | |
"grad_norm": 1.0051416158676147, | |
"learning_rate": 7.440476190476191e-05, | |
"loss": 1.4059, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.464078536367693, | |
"grad_norm": 0.5304180383682251, | |
"learning_rate": 7.738095238095239e-05, | |
"loss": 1.3072, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.4819277108433735, | |
"grad_norm": 0.8797634243965149, | |
"learning_rate": 8.035714285714287e-05, | |
"loss": 1.4132, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.499776885319054, | |
"grad_norm": 0.9049625396728516, | |
"learning_rate": 8.333333333333334e-05, | |
"loss": 1.4121, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.499776885319054, | |
"eval_loss": 1.3727394342422485, | |
"eval_runtime": 17.745, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.5176260597947345, | |
"grad_norm": 0.6793915033340454, | |
"learning_rate": 8.630952380952382e-05, | |
"loss": 1.3109, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.535475234270415, | |
"grad_norm": 0.7171015739440918, | |
"learning_rate": 8.92857142857143e-05, | |
"loss": 1.3781, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.5533244087460955, | |
"grad_norm": 0.6738716959953308, | |
"learning_rate": 9.226190476190478e-05, | |
"loss": 1.3564, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.571173583221776, | |
"grad_norm": 0.699975311756134, | |
"learning_rate": 9.523809523809524e-05, | |
"loss": 1.2387, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.5890227576974565, | |
"grad_norm": 0.7659904956817627, | |
"learning_rate": 9.821428571428572e-05, | |
"loss": 1.3042, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.606871932173137, | |
"grad_norm": 0.9782125353813171, | |
"learning_rate": 9.999956828659095e-05, | |
"loss": 1.3709, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.6247211066488175, | |
"grad_norm": 1.0532957315444946, | |
"learning_rate": 9.999471159635539e-05, | |
"loss": 1.3844, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.642570281124498, | |
"grad_norm": 0.7373877167701721, | |
"learning_rate": 9.998445910004082e-05, | |
"loss": 1.2852, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.6604194556001785, | |
"grad_norm": 1.0207768678665161, | |
"learning_rate": 9.996881190417393e-05, | |
"loss": 1.4652, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.678268630075859, | |
"grad_norm": 0.7943917512893677, | |
"learning_rate": 9.994777169751806e-05, | |
"loss": 1.3743, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.6961178045515395, | |
"grad_norm": 0.7461659908294678, | |
"learning_rate": 9.992134075089084e-05, | |
"loss": 1.2423, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.71396697902722, | |
"grad_norm": 0.9689913988113403, | |
"learning_rate": 9.988952191691925e-05, | |
"loss": 1.3113, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.7318161535029005, | |
"grad_norm": 0.766276478767395, | |
"learning_rate": 9.985231862973168e-05, | |
"loss": 1.3524, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.749665327978581, | |
"grad_norm": 0.6728419661521912, | |
"learning_rate": 9.980973490458728e-05, | |
"loss": 1.4038, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.749665327978581, | |
"eval_loss": 1.3051044940948486, | |
"eval_runtime": 17.7559, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.7675145024542614, | |
"grad_norm": 1.0456575155258179, | |
"learning_rate": 9.976177533744261e-05, | |
"loss": 1.3626, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.785363676929942, | |
"grad_norm": 0.9017456769943237, | |
"learning_rate": 9.97084451044556e-05, | |
"loss": 1.3232, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.8032128514056225, | |
"grad_norm": 0.9113703966140747, | |
"learning_rate": 9.964974996142698e-05, | |
"loss": 1.2826, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.821062025881303, | |
"grad_norm": 0.7177279591560364, | |
"learning_rate": 9.958569624317893e-05, | |
"loss": 1.2794, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.8389112003569835, | |
"grad_norm": 0.9058728814125061, | |
"learning_rate": 9.951629086287151e-05, | |
"loss": 1.3853, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.856760374832664, | |
"grad_norm": 0.6813459992408752, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 1.3533, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.8746095493083444, | |
"grad_norm": 0.7113555073738098, | |
"learning_rate": 9.936145565586871e-05, | |
"loss": 1.3395, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.892458723784025, | |
"grad_norm": 1.243597149848938, | |
"learning_rate": 9.927604254015585e-05, | |
"loss": 1.443, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.9103078982597055, | |
"grad_norm": 0.8651953339576721, | |
"learning_rate": 9.918531118254507e-05, | |
"loss": 1.398, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.928157072735386, | |
"grad_norm": 0.8877395987510681, | |
"learning_rate": 9.90892713754483e-05, | |
"loss": 1.346, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.9460062472110665, | |
"grad_norm": 0.8857008814811707, | |
"learning_rate": 9.898793348420536e-05, | |
"loss": 1.3921, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.963855421686747, | |
"grad_norm": 0.8319969177246094, | |
"learning_rate": 9.888130844596524e-05, | |
"loss": 1.3838, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.9817045961624274, | |
"grad_norm": 0.7452044486999512, | |
"learning_rate": 9.876940776850569e-05, | |
"loss": 1.3529, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.999553770638108, | |
"grad_norm": 0.7535015940666199, | |
"learning_rate": 9.865224352899119e-05, | |
"loss": 1.2739, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.999553770638108, | |
"eval_loss": 1.289029836654663, | |
"eval_runtime": 17.7491, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 280 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 1680, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 6, | |
"save_steps": 70, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 2.950249896817459e+17, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |