|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1875, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008, |
|
"grad_norm": 0.8485889434814453, |
|
"learning_rate": 0.0001999964908278481, |
|
"loss": 1.2049, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.016, |
|
"grad_norm": 0.47789862751960754, |
|
"learning_rate": 0.00019998596355767805, |
|
"loss": 0.9333, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.024, |
|
"grad_norm": 1.017558217048645, |
|
"learning_rate": 0.00019996841892833, |
|
"loss": 0.8671, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.032, |
|
"grad_norm": 0.6610977053642273, |
|
"learning_rate": 0.00019994385817114646, |
|
"loss": 0.7979, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.6075429320335388, |
|
"learning_rate": 0.00019991228300988585, |
|
"loss": 0.7662, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.048, |
|
"grad_norm": 0.6595763564109802, |
|
"learning_rate": 0.00019987369566060176, |
|
"loss": 0.7929, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.056, |
|
"grad_norm": 0.6968618035316467, |
|
"learning_rate": 0.00019982809883148722, |
|
"loss": 0.7683, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.064, |
|
"grad_norm": 0.4889592230319977, |
|
"learning_rate": 0.00019977549572268468, |
|
"loss": 0.8667, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.072, |
|
"grad_norm": 0.6651108264923096, |
|
"learning_rate": 0.0001997158900260614, |
|
"loss": 0.8446, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.5898510217666626, |
|
"learning_rate": 0.00019964928592495045, |
|
"loss": 0.9051, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.088, |
|
"grad_norm": 0.4398016035556793, |
|
"learning_rate": 0.00019957568809385694, |
|
"loss": 0.7235, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.096, |
|
"grad_norm": 0.6901968121528625, |
|
"learning_rate": 0.00019949510169813003, |
|
"loss": 0.8169, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.104, |
|
"grad_norm": 0.6267213225364685, |
|
"learning_rate": 0.00019940753239360047, |
|
"loss": 0.8266, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.112, |
|
"grad_norm": 0.48524895310401917, |
|
"learning_rate": 0.00019931298632618356, |
|
"loss": 0.758, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.5294132232666016, |
|
"learning_rate": 0.0001992114701314478, |
|
"loss": 0.7759, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.128, |
|
"grad_norm": 0.48957982659339905, |
|
"learning_rate": 0.0001991029909341493, |
|
"loss": 0.7797, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.136, |
|
"grad_norm": 0.645412802696228, |
|
"learning_rate": 0.00019898755634773158, |
|
"loss": 0.7437, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.144, |
|
"grad_norm": 0.43297675251960754, |
|
"learning_rate": 0.0001988651744737914, |
|
"loss": 0.8043, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.152, |
|
"grad_norm": 0.5513920783996582, |
|
"learning_rate": 0.00019873585390151003, |
|
"loss": 0.7701, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.8462435007095337, |
|
"learning_rate": 0.0001985996037070505, |
|
"loss": 0.709, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.168, |
|
"grad_norm": 0.6892585158348083, |
|
"learning_rate": 0.00019845643345292054, |
|
"loss": 0.7377, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.176, |
|
"grad_norm": 0.4617864191532135, |
|
"learning_rate": 0.00019830635318730154, |
|
"loss": 0.8352, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.184, |
|
"grad_norm": 0.6300354599952698, |
|
"learning_rate": 0.0001981493734433433, |
|
"loss": 0.7738, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.192, |
|
"grad_norm": 0.8086859583854675, |
|
"learning_rate": 0.0001979855052384247, |
|
"loss": 0.8067, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.6272985935211182, |
|
"learning_rate": 0.00019781476007338058, |
|
"loss": 0.7456, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.208, |
|
"grad_norm": 0.44750839471817017, |
|
"learning_rate": 0.00019763714993169452, |
|
"loss": 0.758, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.216, |
|
"grad_norm": 0.5053977370262146, |
|
"learning_rate": 0.00019745268727865774, |
|
"loss": 0.7895, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.224, |
|
"grad_norm": 0.41920769214630127, |
|
"learning_rate": 0.00019726138506049438, |
|
"loss": 0.7302, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.232, |
|
"grad_norm": 0.38280290365219116, |
|
"learning_rate": 0.00019706325670345275, |
|
"loss": 0.8152, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.554710865020752, |
|
"learning_rate": 0.0001968583161128631, |
|
"loss": 0.8461, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.248, |
|
"grad_norm": 0.5612509250640869, |
|
"learning_rate": 0.00019664657767216176, |
|
"loss": 0.7787, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.256, |
|
"grad_norm": 0.610614538192749, |
|
"learning_rate": 0.00019642805624188147, |
|
"loss": 0.7574, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.264, |
|
"grad_norm": 0.679517924785614, |
|
"learning_rate": 0.0001962027671586086, |
|
"loss": 0.8487, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.272, |
|
"grad_norm": 0.6685434579849243, |
|
"learning_rate": 0.00019597072623390668, |
|
"loss": 0.6611, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.480293869972229, |
|
"learning_rate": 0.00019573194975320673, |
|
"loss": 0.7802, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.288, |
|
"grad_norm": 0.7727369070053101, |
|
"learning_rate": 0.00019548645447466431, |
|
"loss": 0.6727, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.296, |
|
"grad_norm": 0.6371043920516968, |
|
"learning_rate": 0.00019523425762798329, |
|
"loss": 0.7502, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.304, |
|
"grad_norm": 0.6399966478347778, |
|
"learning_rate": 0.00019497537691320668, |
|
"loss": 0.8401, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.312, |
|
"grad_norm": 0.7263137698173523, |
|
"learning_rate": 0.00019470983049947444, |
|
"loss": 0.7494, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.402416467666626, |
|
"learning_rate": 0.00019443763702374812, |
|
"loss": 0.7842, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.328, |
|
"grad_norm": 0.6639626026153564, |
|
"learning_rate": 0.00019415881558950302, |
|
"loss": 0.8082, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.336, |
|
"grad_norm": 0.5801042914390564, |
|
"learning_rate": 0.00019387338576538744, |
|
"loss": 0.7883, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.344, |
|
"grad_norm": 0.5533607006072998, |
|
"learning_rate": 0.00019358136758384912, |
|
"loss": 0.7356, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.352, |
|
"grad_norm": 0.6019654273986816, |
|
"learning_rate": 0.00019328278153972947, |
|
"loss": 0.7891, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.5344104170799255, |
|
"learning_rate": 0.00019297764858882514, |
|
"loss": 0.7671, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.368, |
|
"grad_norm": 0.5494843125343323, |
|
"learning_rate": 0.0001926659901464172, |
|
"loss": 0.6608, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.376, |
|
"grad_norm": 0.465420126914978, |
|
"learning_rate": 0.00019234782808576824, |
|
"loss": 0.647, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.384, |
|
"grad_norm": 0.5202775001525879, |
|
"learning_rate": 0.00019202318473658705, |
|
"loss": 0.729, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.392, |
|
"grad_norm": 0.5757818222045898, |
|
"learning_rate": 0.00019169208288346166, |
|
"loss": 0.6713, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.46555572748184204, |
|
"learning_rate": 0.0001913545457642601, |
|
"loss": 0.7049, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.408, |
|
"grad_norm": 0.5101790428161621, |
|
"learning_rate": 0.00019101059706849957, |
|
"loss": 0.7419, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.416, |
|
"grad_norm": 0.6083744764328003, |
|
"learning_rate": 0.00019066026093568378, |
|
"loss": 0.7148, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.424, |
|
"grad_norm": 0.4719640612602234, |
|
"learning_rate": 0.00019030356195360874, |
|
"loss": 0.7493, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.432, |
|
"grad_norm": 0.7365225553512573, |
|
"learning_rate": 0.0001899405251566371, |
|
"loss": 0.7652, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.4452705383300781, |
|
"learning_rate": 0.0001895711760239413, |
|
"loss": 0.7438, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.448, |
|
"grad_norm": 0.6071786284446716, |
|
"learning_rate": 0.0001891955404777151, |
|
"loss": 0.7683, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.456, |
|
"grad_norm": 0.5774498581886292, |
|
"learning_rate": 0.00018881364488135448, |
|
"loss": 0.8115, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.464, |
|
"grad_norm": 0.6134682893753052, |
|
"learning_rate": 0.00018842551603760724, |
|
"loss": 0.8335, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.472, |
|
"grad_norm": 0.4869893193244934, |
|
"learning_rate": 0.00018803118118669202, |
|
"loss": 0.6933, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.6457111239433289, |
|
"learning_rate": 0.00018763066800438636, |
|
"loss": 0.7515, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.488, |
|
"grad_norm": 0.59674471616745, |
|
"learning_rate": 0.0001872240046000844, |
|
"loss": 0.6931, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.496, |
|
"grad_norm": 0.44608160853385925, |
|
"learning_rate": 0.00018681121951482393, |
|
"loss": 0.782, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.504, |
|
"grad_norm": 0.5934664607048035, |
|
"learning_rate": 0.00018639234171928353, |
|
"loss": 0.7361, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.512, |
|
"grad_norm": 0.49716323614120483, |
|
"learning_rate": 0.0001859674006117491, |
|
"loss": 0.7443, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.47995495796203613, |
|
"learning_rate": 0.00018553642601605068, |
|
"loss": 0.7221, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.528, |
|
"grad_norm": 0.5177399516105652, |
|
"learning_rate": 0.00018509944817946922, |
|
"loss": 0.7622, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.536, |
|
"grad_norm": 0.6638798713684082, |
|
"learning_rate": 0.0001846564977706138, |
|
"loss": 0.8556, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.544, |
|
"grad_norm": 0.5056771636009216, |
|
"learning_rate": 0.00018420760587726923, |
|
"loss": 0.7814, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.552, |
|
"grad_norm": 0.44543707370758057, |
|
"learning_rate": 0.0001837528040042142, |
|
"loss": 0.722, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.6765120625495911, |
|
"learning_rate": 0.00018329212407100994, |
|
"loss": 0.7903, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.568, |
|
"grad_norm": 0.49232372641563416, |
|
"learning_rate": 0.00018282559840976042, |
|
"loss": 0.6996, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.576, |
|
"grad_norm": 0.47392791509628296, |
|
"learning_rate": 0.00018235325976284275, |
|
"loss": 0.773, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.584, |
|
"grad_norm": 0.5056615471839905, |
|
"learning_rate": 0.00018187514128060946, |
|
"loss": 0.728, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.592, |
|
"grad_norm": 0.5857616662979126, |
|
"learning_rate": 0.00018139127651906184, |
|
"loss": 0.7659, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.5966864228248596, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.7039, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.608, |
|
"grad_norm": 0.4524347484111786, |
|
"learning_rate": 0.00018040644439611348, |
|
"loss": 0.7125, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.616, |
|
"grad_norm": 0.5570976138114929, |
|
"learning_rate": 0.00017990554615362198, |
|
"loss": 0.698, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.624, |
|
"grad_norm": 0.6045777201652527, |
|
"learning_rate": 0.00017939903986478355, |
|
"loss": 0.8255, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.632, |
|
"grad_norm": 0.6149687767028809, |
|
"learning_rate": 0.00017888696107795342, |
|
"loss": 0.6616, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.4873579144477844, |
|
"learning_rate": 0.000178369345732584, |
|
"loss": 0.7452, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.648, |
|
"grad_norm": 0.5569061636924744, |
|
"learning_rate": 0.00017784623015670238, |
|
"loss": 0.7652, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.656, |
|
"grad_norm": 0.5825181603431702, |
|
"learning_rate": 0.00017731765106436073, |
|
"loss": 0.7793, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.664, |
|
"grad_norm": 0.4047383666038513, |
|
"learning_rate": 0.00017678364555305978, |
|
"loss": 0.6875, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.672, |
|
"grad_norm": 0.5080836415290833, |
|
"learning_rate": 0.0001762442511011448, |
|
"loss": 0.7465, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.5825940370559692, |
|
"learning_rate": 0.00017569950556517566, |
|
"loss": 0.7205, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.688, |
|
"grad_norm": 0.476992666721344, |
|
"learning_rate": 0.00017514944717726962, |
|
"loss": 0.6589, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.696, |
|
"grad_norm": 0.7424727082252502, |
|
"learning_rate": 0.00017459411454241822, |
|
"loss": 0.7035, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.704, |
|
"grad_norm": 0.6544787287712097, |
|
"learning_rate": 0.00017403354663577783, |
|
"loss": 0.787, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.712, |
|
"grad_norm": 0.49425187706947327, |
|
"learning_rate": 0.00017346778279993415, |
|
"loss": 0.7515, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.5473236441612244, |
|
"learning_rate": 0.00017289686274214118, |
|
"loss": 0.7199, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.728, |
|
"grad_norm": 0.6773544549942017, |
|
"learning_rate": 0.00017232082653153422, |
|
"loss": 0.8037, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.736, |
|
"grad_norm": 0.6355096101760864, |
|
"learning_rate": 0.00017173971459631787, |
|
"loss": 0.7502, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.744, |
|
"grad_norm": 0.47867000102996826, |
|
"learning_rate": 0.00017115356772092857, |
|
"loss": 0.7446, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.752, |
|
"grad_norm": 0.5135357975959778, |
|
"learning_rate": 0.0001705624270431721, |
|
"loss": 0.6507, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.48866042494773865, |
|
"learning_rate": 0.00016996633405133655, |
|
"loss": 0.7164, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.768, |
|
"grad_norm": 0.5892354249954224, |
|
"learning_rate": 0.0001693653305812805, |
|
"loss": 0.7621, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.776, |
|
"grad_norm": 0.6633970141410828, |
|
"learning_rate": 0.00016875945881349676, |
|
"loss": 0.7623, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.784, |
|
"grad_norm": 0.6444060802459717, |
|
"learning_rate": 0.000168148761270152, |
|
"loss": 0.6606, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.792, |
|
"grad_norm": 0.7012648582458496, |
|
"learning_rate": 0.00016753328081210245, |
|
"loss": 0.6941, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.7064160704612732, |
|
"learning_rate": 0.00016691306063588583, |
|
"loss": 0.6841, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.808, |
|
"grad_norm": 0.7241398096084595, |
|
"learning_rate": 0.00016628814427068953, |
|
"loss": 0.6996, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.816, |
|
"grad_norm": 0.7807374596595764, |
|
"learning_rate": 0.00016565857557529566, |
|
"loss": 0.7542, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.824, |
|
"grad_norm": 0.763768196105957, |
|
"learning_rate": 0.00016502439873500289, |
|
"loss": 0.7175, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.832, |
|
"grad_norm": 0.6105090379714966, |
|
"learning_rate": 0.0001643856582585254, |
|
"loss": 0.7565, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.5686540603637695, |
|
"learning_rate": 0.000163742398974869, |
|
"loss": 0.7339, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.848, |
|
"grad_norm": 0.5341500043869019, |
|
"learning_rate": 0.00016309466603018496, |
|
"loss": 0.569, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.856, |
|
"grad_norm": 0.7274748682975769, |
|
"learning_rate": 0.00016244250488460158, |
|
"loss": 0.7556, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.864, |
|
"grad_norm": 0.7321165204048157, |
|
"learning_rate": 0.00016178596130903344, |
|
"loss": 0.7084, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.872, |
|
"grad_norm": 0.5086159110069275, |
|
"learning_rate": 0.00016112508138196917, |
|
"loss": 0.6935, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.4714389443397522, |
|
"learning_rate": 0.0001604599114862375, |
|
"loss": 0.7076, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.888, |
|
"grad_norm": 0.5031452178955078, |
|
"learning_rate": 0.0001597904983057519, |
|
"loss": 0.7151, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.896, |
|
"grad_norm": 0.7745943665504456, |
|
"learning_rate": 0.0001591168888222342, |
|
"loss": 0.7001, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.904, |
|
"grad_norm": 0.6076303124427795, |
|
"learning_rate": 0.00015843913031191723, |
|
"loss": 0.7285, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.912, |
|
"grad_norm": 0.7456529140472412, |
|
"learning_rate": 0.00015775727034222675, |
|
"loss": 0.8041, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.5760998725891113, |
|
"learning_rate": 0.0001570713567684432, |
|
"loss": 0.7353, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.928, |
|
"grad_norm": 0.7057327032089233, |
|
"learning_rate": 0.00015638143773034267, |
|
"loss": 0.7792, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.936, |
|
"grad_norm": 0.7615967392921448, |
|
"learning_rate": 0.00015568756164881882, |
|
"loss": 1.0121, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.944, |
|
"grad_norm": 0.6304950714111328, |
|
"learning_rate": 0.000154989777222484, |
|
"loss": 0.7727, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.952, |
|
"grad_norm": 0.6852543950080872, |
|
"learning_rate": 0.00015428813342425177, |
|
"loss": 0.741, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.6379660964012146, |
|
"learning_rate": 0.00015358267949789966, |
|
"loss": 0.6919, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.968, |
|
"grad_norm": 0.5846463441848755, |
|
"learning_rate": 0.00015287346495461315, |
|
"loss": 0.7163, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.976, |
|
"grad_norm": 0.5999557971954346, |
|
"learning_rate": 0.0001521605395695108, |
|
"loss": 0.8152, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.984, |
|
"grad_norm": 0.5806307196617126, |
|
"learning_rate": 0.00015144395337815064, |
|
"loss": 0.6709, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.992, |
|
"grad_norm": 0.6559942960739136, |
|
"learning_rate": 0.00015072375667301893, |
|
"loss": 0.6527, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.6287715435028076, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.8194, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.008, |
|
"grad_norm": 0.616222620010376, |
|
"learning_rate": 0.00014927273415482915, |
|
"loss": 0.6627, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.016, |
|
"grad_norm": 0.4750412106513977, |
|
"learning_rate": 0.0001485420101795274, |
|
"loss": 0.6366, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.024, |
|
"grad_norm": 0.5122964978218079, |
|
"learning_rate": 0.00014780787935881923, |
|
"loss": 0.6717, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.032, |
|
"grad_norm": 0.7382633090019226, |
|
"learning_rate": 0.0001470703932165333, |
|
"loss": 0.6483, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.6540554761886597, |
|
"learning_rate": 0.00014632960351198618, |
|
"loss": 0.6151, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.048, |
|
"grad_norm": 0.4776591956615448, |
|
"learning_rate": 0.00014558556223635003, |
|
"loss": 0.6707, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.056, |
|
"grad_norm": 0.8012662529945374, |
|
"learning_rate": 0.00014483832160900326, |
|
"loss": 0.6125, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.064, |
|
"grad_norm": 0.6735953092575073, |
|
"learning_rate": 0.00014408793407386588, |
|
"loss": 0.6206, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.072, |
|
"grad_norm": 0.5640230774879456, |
|
"learning_rate": 0.00014333445229571873, |
|
"loss": 0.6161, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.5928654074668884, |
|
"learning_rate": 0.00014257792915650728, |
|
"loss": 0.6583, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.088, |
|
"grad_norm": 0.7347397208213806, |
|
"learning_rate": 0.00014181841775163013, |
|
"loss": 0.6222, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.096, |
|
"grad_norm": 0.593773365020752, |
|
"learning_rate": 0.0001410559713862128, |
|
"loss": 0.716, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.104, |
|
"grad_norm": 0.6244611144065857, |
|
"learning_rate": 0.00014029064357136628, |
|
"loss": 0.6198, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.112, |
|
"grad_norm": 0.5083370804786682, |
|
"learning_rate": 0.00013952248802043165, |
|
"loss": 0.6389, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 0.5241413116455078, |
|
"learning_rate": 0.0001387515586452103, |
|
"loss": 0.6842, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1280000000000001, |
|
"grad_norm": 0.524029016494751, |
|
"learning_rate": 0.00013797790955218014, |
|
"loss": 0.6071, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.1360000000000001, |
|
"grad_norm": 0.5097878575325012, |
|
"learning_rate": 0.00013720159503869815, |
|
"loss": 0.5915, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.144, |
|
"grad_norm": 0.5782963037490845, |
|
"learning_rate": 0.00013642266958918984, |
|
"loss": 0.6794, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.152, |
|
"grad_norm": 0.6088266372680664, |
|
"learning_rate": 0.00013564118787132506, |
|
"loss": 0.6773, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 0.7768995761871338, |
|
"learning_rate": 0.00013485720473218154, |
|
"loss": 0.668, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.168, |
|
"grad_norm": 0.6645551919937134, |
|
"learning_rate": 0.0001340707751943952, |
|
"loss": 0.6997, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.176, |
|
"grad_norm": 0.9228842258453369, |
|
"learning_rate": 0.00013328195445229868, |
|
"loss": 0.831, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.184, |
|
"grad_norm": 0.7556049823760986, |
|
"learning_rate": 0.00013249079786804765, |
|
"loss": 0.6378, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.192, |
|
"grad_norm": 0.832775354385376, |
|
"learning_rate": 0.0001316973609677352, |
|
"loss": 0.6547, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.7329304814338684, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.5808, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.208, |
|
"grad_norm": 0.7193475961685181, |
|
"learning_rate": 0.00013010386911959206, |
|
"loss": 0.5582, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.216, |
|
"grad_norm": 0.6274734735488892, |
|
"learning_rate": 0.00012930392600850573, |
|
"loss": 0.5801, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.224, |
|
"grad_norm": 0.6485865712165833, |
|
"learning_rate": 0.0001285019262469976, |
|
"loss": 0.65, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.232, |
|
"grad_norm": 0.7164427042007446, |
|
"learning_rate": 0.00012769792612217224, |
|
"loss": 0.6627, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 0.600775957107544, |
|
"learning_rate": 0.00012689198206152657, |
|
"loss": 0.5603, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.248, |
|
"grad_norm": 0.8377975225448608, |
|
"learning_rate": 0.00012608415062898972, |
|
"loss": 0.6525, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.256, |
|
"grad_norm": 0.8069924116134644, |
|
"learning_rate": 0.00012527448852095295, |
|
"loss": 0.6731, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.264, |
|
"grad_norm": 0.6501213908195496, |
|
"learning_rate": 0.00012446305256229073, |
|
"loss": 0.6255, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.272, |
|
"grad_norm": 0.62812340259552, |
|
"learning_rate": 0.00012364989970237248, |
|
"loss": 0.6585, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 0.5702307820320129, |
|
"learning_rate": 0.00012283508701106557, |
|
"loss": 0.5996, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.288, |
|
"grad_norm": 0.6311281323432922, |
|
"learning_rate": 0.00012201867167473015, |
|
"loss": 0.6355, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.296, |
|
"grad_norm": 0.5885419249534607, |
|
"learning_rate": 0.00012120071099220549, |
|
"loss": 0.6615, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.304, |
|
"grad_norm": 0.5239307284355164, |
|
"learning_rate": 0.0001203812623707885, |
|
"loss": 0.6096, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.312, |
|
"grad_norm": 0.6101869940757751, |
|
"learning_rate": 0.00011956038332220483, |
|
"loss": 0.5984, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.4395413100719452, |
|
"learning_rate": 0.00011873813145857249, |
|
"loss": 0.5569, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.328, |
|
"grad_norm": 0.8984820246696472, |
|
"learning_rate": 0.00011791456448835825, |
|
"loss": 0.7088, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.336, |
|
"grad_norm": 0.7709664106369019, |
|
"learning_rate": 0.00011708974021232769, |
|
"loss": 0.6731, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.3439999999999999, |
|
"grad_norm": 0.6782217025756836, |
|
"learning_rate": 0.00011626371651948838, |
|
"loss": 0.6188, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.3519999999999999, |
|
"grad_norm": 0.6427358984947205, |
|
"learning_rate": 0.00011543655138302714, |
|
"loss": 0.7004, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 0.5902594923973083, |
|
"learning_rate": 0.00011460830285624118, |
|
"loss": 0.5884, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.3679999999999999, |
|
"grad_norm": 0.5935835838317871, |
|
"learning_rate": 0.0001137790290684638, |
|
"loss": 0.5739, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.376, |
|
"grad_norm": 0.6752728223800659, |
|
"learning_rate": 0.00011294878822098469, |
|
"loss": 0.6435, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.384, |
|
"grad_norm": 0.7927135825157166, |
|
"learning_rate": 0.00011211763858296507, |
|
"loss": 0.6897, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.392, |
|
"grad_norm": 0.714499294757843, |
|
"learning_rate": 0.00011128563848734816, |
|
"loss": 0.6641, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.7086356282234192, |
|
"learning_rate": 0.00011045284632676536, |
|
"loss": 0.6273, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.408, |
|
"grad_norm": 0.6125518679618835, |
|
"learning_rate": 0.00010961932054943778, |
|
"loss": 0.6437, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.416, |
|
"grad_norm": 0.5635287165641785, |
|
"learning_rate": 0.00010878511965507434, |
|
"loss": 0.6345, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.424, |
|
"grad_norm": 0.47936007380485535, |
|
"learning_rate": 0.00010795030219076599, |
|
"loss": 0.5913, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.432, |
|
"grad_norm": 0.7142558097839355, |
|
"learning_rate": 0.00010711492674687671, |
|
"loss": 0.6482, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.5252729058265686, |
|
"learning_rate": 0.00010627905195293135, |
|
"loss": 0.6165, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.448, |
|
"grad_norm": 0.896318793296814, |
|
"learning_rate": 0.00010544273647350092, |
|
"loss": 0.634, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 1.456, |
|
"grad_norm": 0.6029036045074463, |
|
"learning_rate": 0.00010460603900408523, |
|
"loss": 0.6509, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.464, |
|
"grad_norm": 0.6835671663284302, |
|
"learning_rate": 0.00010376901826699348, |
|
"loss": 0.6212, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 1.472, |
|
"grad_norm": 0.7098750472068787, |
|
"learning_rate": 0.00010293173300722285, |
|
"loss": 0.7305, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.675316333770752, |
|
"learning_rate": 0.0001020942419883357, |
|
"loss": 0.6685, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 1.488, |
|
"grad_norm": 0.6675406098365784, |
|
"learning_rate": 0.00010125660398833528, |
|
"loss": 0.6214, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.496, |
|
"grad_norm": 0.6629154682159424, |
|
"learning_rate": 0.0001004188777955404, |
|
"loss": 0.6035, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 1.504, |
|
"grad_norm": 0.7732692360877991, |
|
"learning_rate": 9.958112220445963e-05, |
|
"loss": 0.5868, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.512, |
|
"grad_norm": 0.6238484978675842, |
|
"learning_rate": 9.874339601166473e-05, |
|
"loss": 0.6003, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 0.6622412800788879, |
|
"learning_rate": 9.790575801166432e-05, |
|
"loss": 0.5854, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.528, |
|
"grad_norm": 0.7598085999488831, |
|
"learning_rate": 9.706826699277718e-05, |
|
"loss": 0.5882, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 1.536, |
|
"grad_norm": 0.8870390057563782, |
|
"learning_rate": 9.623098173300654e-05, |
|
"loss": 0.7187, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.544, |
|
"grad_norm": 0.702232837677002, |
|
"learning_rate": 9.539396099591476e-05, |
|
"loss": 0.6156, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.552, |
|
"grad_norm": 0.694139301776886, |
|
"learning_rate": 9.455726352649911e-05, |
|
"loss": 0.6488, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.5684956312179565, |
|
"learning_rate": 9.372094804706867e-05, |
|
"loss": 0.6601, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 1.568, |
|
"grad_norm": 0.6327118277549744, |
|
"learning_rate": 9.288507325312335e-05, |
|
"loss": 0.5968, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.576, |
|
"grad_norm": 0.5732144117355347, |
|
"learning_rate": 9.204969780923403e-05, |
|
"loss": 0.7034, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 1.584, |
|
"grad_norm": 0.7025273442268372, |
|
"learning_rate": 9.121488034492569e-05, |
|
"loss": 0.5973, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.592, |
|
"grad_norm": 0.9270740151405334, |
|
"learning_rate": 9.038067945056227e-05, |
|
"loss": 0.7877, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.6676818132400513, |
|
"learning_rate": 8.954715367323468e-05, |
|
"loss": 0.644, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.608, |
|
"grad_norm": 0.6687547564506531, |
|
"learning_rate": 8.871436151265184e-05, |
|
"loss": 0.6678, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 1.616, |
|
"grad_norm": 0.7400322556495667, |
|
"learning_rate": 8.788236141703498e-05, |
|
"loss": 0.6088, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.624, |
|
"grad_norm": 0.5504963994026184, |
|
"learning_rate": 8.705121177901532e-05, |
|
"loss": 0.6219, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 1.6320000000000001, |
|
"grad_norm": 0.8088738322257996, |
|
"learning_rate": 8.62209709315362e-05, |
|
"loss": 0.6698, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.6400000000000001, |
|
"grad_norm": 0.7380816340446472, |
|
"learning_rate": 8.539169714375885e-05, |
|
"loss": 0.6207, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 1.6480000000000001, |
|
"grad_norm": 0.6346850395202637, |
|
"learning_rate": 8.456344861697289e-05, |
|
"loss": 0.626, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.6560000000000001, |
|
"grad_norm": 0.49918702244758606, |
|
"learning_rate": 8.373628348051165e-05, |
|
"loss": 0.6972, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 1.6640000000000001, |
|
"grad_norm": 0.7200607657432556, |
|
"learning_rate": 8.291025978767235e-05, |
|
"loss": 0.6282, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.6720000000000002, |
|
"grad_norm": 0.8350688815116882, |
|
"learning_rate": 8.208543551164178e-05, |
|
"loss": 0.6219, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 0.5957468748092651, |
|
"learning_rate": 8.126186854142752e-05, |
|
"loss": 0.622, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.688, |
|
"grad_norm": 0.7494757175445557, |
|
"learning_rate": 8.04396166777952e-05, |
|
"loss": 0.5801, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 1.696, |
|
"grad_norm": 0.7109113931655884, |
|
"learning_rate": 7.961873762921153e-05, |
|
"loss": 0.598, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.704, |
|
"grad_norm": 0.7941219210624695, |
|
"learning_rate": 7.879928900779456e-05, |
|
"loss": 0.699, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 1.712, |
|
"grad_norm": 0.7282152771949768, |
|
"learning_rate": 7.798132832526986e-05, |
|
"loss": 0.5882, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 0.5923735499382019, |
|
"learning_rate": 7.716491298893442e-05, |
|
"loss": 0.5321, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 1.728, |
|
"grad_norm": 0.5571523308753967, |
|
"learning_rate": 7.635010029762756e-05, |
|
"loss": 0.6647, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.736, |
|
"grad_norm": 0.6043043732643127, |
|
"learning_rate": 7.553694743770928e-05, |
|
"loss": 0.628, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 1.744, |
|
"grad_norm": 0.8733720183372498, |
|
"learning_rate": 7.472551147904708e-05, |
|
"loss": 0.6262, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.752, |
|
"grad_norm": 0.5561848878860474, |
|
"learning_rate": 7.391584937101033e-05, |
|
"loss": 0.6131, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.6611356735229492, |
|
"learning_rate": 7.310801793847344e-05, |
|
"loss": 0.6494, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.768, |
|
"grad_norm": 0.5700333714485168, |
|
"learning_rate": 7.230207387782776e-05, |
|
"loss": 0.5514, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 1.776, |
|
"grad_norm": 0.705662727355957, |
|
"learning_rate": 7.149807375300239e-05, |
|
"loss": 0.5823, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.784, |
|
"grad_norm": 0.9632449150085449, |
|
"learning_rate": 7.069607399149428e-05, |
|
"loss": 0.7207, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 1.792, |
|
"grad_norm": 0.6412242650985718, |
|
"learning_rate": 6.989613088040796e-05, |
|
"loss": 0.7006, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.538044273853302, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.6222, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 1.808, |
|
"grad_norm": 0.6124762296676636, |
|
"learning_rate": 6.830263903226483e-05, |
|
"loss": 0.6569, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.8159999999999998, |
|
"grad_norm": 0.7250857949256897, |
|
"learning_rate": 6.750920213195238e-05, |
|
"loss": 0.543, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 1.8239999999999998, |
|
"grad_norm": 0.6840488910675049, |
|
"learning_rate": 6.671804554770135e-05, |
|
"loss": 0.6334, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.8319999999999999, |
|
"grad_norm": 0.8932304382324219, |
|
"learning_rate": 6.592922480560483e-05, |
|
"loss": 0.6701, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 0.9220761656761169, |
|
"learning_rate": 6.51427952678185e-05, |
|
"loss": 0.6216, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.8479999999999999, |
|
"grad_norm": 0.688731849193573, |
|
"learning_rate": 6.435881212867493e-05, |
|
"loss": 0.5877, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 1.8559999999999999, |
|
"grad_norm": 0.6370134949684143, |
|
"learning_rate": 6.357733041081018e-05, |
|
"loss": 0.6256, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.8639999999999999, |
|
"grad_norm": 0.7421153783798218, |
|
"learning_rate": 6.27984049613019e-05, |
|
"loss": 0.7062, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 1.8719999999999999, |
|
"grad_norm": 0.8129108548164368, |
|
"learning_rate": 6.20220904478199e-05, |
|
"loss": 0.6304, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.7866687774658203, |
|
"learning_rate": 6.12484413547897e-05, |
|
"loss": 0.7695, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 1.888, |
|
"grad_norm": 0.8307198882102966, |
|
"learning_rate": 6.047751197956838e-05, |
|
"loss": 0.5723, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.896, |
|
"grad_norm": 0.5541536211967468, |
|
"learning_rate": 5.9709356428633746e-05, |
|
"loss": 0.6847, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 1.904, |
|
"grad_norm": 0.731555163860321, |
|
"learning_rate": 5.8944028613787206e-05, |
|
"loss": 0.6618, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.912, |
|
"grad_norm": 1.020991325378418, |
|
"learning_rate": 5.818158224836987e-05, |
|
"loss": 0.6275, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 0.7172287702560425, |
|
"learning_rate": 5.7422070843492734e-05, |
|
"loss": 0.5617, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.928, |
|
"grad_norm": 0.7232096791267395, |
|
"learning_rate": 5.666554770428129e-05, |
|
"loss": 0.6278, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 1.936, |
|
"grad_norm": 0.7234194278717041, |
|
"learning_rate": 5.591206592613416e-05, |
|
"loss": 0.6713, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 1.944, |
|
"grad_norm": 0.6906010508537292, |
|
"learning_rate": 5.5161678390996796e-05, |
|
"loss": 0.6113, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 1.952, |
|
"grad_norm": 0.9057906270027161, |
|
"learning_rate": 5.441443776365003e-05, |
|
"loss": 0.5587, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 0.7144932150840759, |
|
"learning_rate": 5.3670396488013854e-05, |
|
"loss": 0.5601, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 1.968, |
|
"grad_norm": 1.0071252584457397, |
|
"learning_rate": 5.292960678346675e-05, |
|
"loss": 0.5941, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 1.976, |
|
"grad_norm": 0.7061260342597961, |
|
"learning_rate": 5.2192120641180786e-05, |
|
"loss": 0.6285, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 1.984, |
|
"grad_norm": 0.6937069892883301, |
|
"learning_rate": 5.145798982047261e-05, |
|
"loss": 0.6516, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 1.992, |
|
"grad_norm": 0.5176392793655396, |
|
"learning_rate": 5.072726584517086e-05, |
|
"loss": 0.5904, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.8501409292221069, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.619, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.008, |
|
"grad_norm": 0.5451085567474365, |
|
"learning_rate": 4.927624332698109e-05, |
|
"loss": 0.6058, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 2.016, |
|
"grad_norm": 0.8410437107086182, |
|
"learning_rate": 4.8556046621849346e-05, |
|
"loss": 0.6248, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.024, |
|
"grad_norm": 0.6089378595352173, |
|
"learning_rate": 4.783946043048923e-05, |
|
"loss": 0.5247, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 2.032, |
|
"grad_norm": 0.47437986731529236, |
|
"learning_rate": 4.712653504538683e-05, |
|
"loss": 0.5439, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 0.6925654411315918, |
|
"learning_rate": 4.6417320502100316e-05, |
|
"loss": 0.4491, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 2.048, |
|
"grad_norm": 0.8538162708282471, |
|
"learning_rate": 4.5711866575748276e-05, |
|
"loss": 0.52, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.056, |
|
"grad_norm": 0.8048768639564514, |
|
"learning_rate": 4.501022277751602e-05, |
|
"loss": 0.5259, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 2.064, |
|
"grad_norm": 0.7191641330718994, |
|
"learning_rate": 4.431243835118124e-05, |
|
"loss": 0.5025, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.072, |
|
"grad_norm": 1.0918892621994019, |
|
"learning_rate": 4.361856226965733e-05, |
|
"loss": 0.4772, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.8152824640274048, |
|
"learning_rate": 4.2928643231556844e-05, |
|
"loss": 0.5945, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.088, |
|
"grad_norm": 0.640073299407959, |
|
"learning_rate": 4.224272965777326e-05, |
|
"loss": 0.4813, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 2.096, |
|
"grad_norm": 0.7172432541847229, |
|
"learning_rate": 4.15608696880828e-05, |
|
"loss": 0.5315, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.104, |
|
"grad_norm": 1.1283674240112305, |
|
"learning_rate": 4.08831111777658e-05, |
|
"loss": 0.5591, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 2.112, |
|
"grad_norm": 0.8184736967086792, |
|
"learning_rate": 4.020950169424815e-05, |
|
"loss": 0.605, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 0.6823618412017822, |
|
"learning_rate": 3.954008851376252e-05, |
|
"loss": 0.4955, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 2.128, |
|
"grad_norm": 0.8576385378837585, |
|
"learning_rate": 3.887491861803085e-05, |
|
"loss": 0.5757, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.136, |
|
"grad_norm": 0.967835009098053, |
|
"learning_rate": 3.821403869096658e-05, |
|
"loss": 0.5313, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 2.144, |
|
"grad_norm": 0.7330173254013062, |
|
"learning_rate": 3.755749511539845e-05, |
|
"loss": 0.5904, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.152, |
|
"grad_norm": 0.6664792895317078, |
|
"learning_rate": 3.690533396981504e-05, |
|
"loss": 0.4679, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"grad_norm": 0.7639065980911255, |
|
"learning_rate": 3.6257601025131026e-05, |
|
"loss": 0.5235, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.168, |
|
"grad_norm": 0.6960520148277283, |
|
"learning_rate": 3.561434174147463e-05, |
|
"loss": 0.5797, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 2.176, |
|
"grad_norm": 0.8231356143951416, |
|
"learning_rate": 3.497560126499709e-05, |
|
"loss": 0.5772, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 2.184, |
|
"grad_norm": 0.8968437910079956, |
|
"learning_rate": 3.4341424424704375e-05, |
|
"loss": 0.5316, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 2.192, |
|
"grad_norm": 0.8037480711936951, |
|
"learning_rate": 3.371185572931048e-05, |
|
"loss": 0.5646, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 0.9330148696899414, |
|
"learning_rate": 3.308693936411421e-05, |
|
"loss": 0.5431, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 2.208, |
|
"grad_norm": 0.6958775520324707, |
|
"learning_rate": 3.246671918789755e-05, |
|
"loss": 0.5403, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 2.216, |
|
"grad_norm": 0.9870476126670837, |
|
"learning_rate": 3.1851238729848034e-05, |
|
"loss": 0.5329, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 2.224, |
|
"grad_norm": 0.5036590099334717, |
|
"learning_rate": 3.124054118650327e-05, |
|
"loss": 0.5696, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 2.232, |
|
"grad_norm": 0.8640053868293762, |
|
"learning_rate": 3.063466941871952e-05, |
|
"loss": 0.59, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"grad_norm": 0.6065173149108887, |
|
"learning_rate": 3.0033665948663448e-05, |
|
"loss": 0.5116, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.248, |
|
"grad_norm": 1.083775520324707, |
|
"learning_rate": 2.9437572956827964e-05, |
|
"loss": 0.5783, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 2.2560000000000002, |
|
"grad_norm": 0.7090497016906738, |
|
"learning_rate": 2.8846432279071467e-05, |
|
"loss": 0.6259, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 2.2640000000000002, |
|
"grad_norm": 0.742468535900116, |
|
"learning_rate": 2.826028540368215e-05, |
|
"loss": 0.5759, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 2.2720000000000002, |
|
"grad_norm": 0.9219839572906494, |
|
"learning_rate": 2.7679173468465812e-05, |
|
"loss": 0.497, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 2.2800000000000002, |
|
"grad_norm": 0.7159206867218018, |
|
"learning_rate": 2.7103137257858868e-05, |
|
"loss": 0.619, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 2.288, |
|
"grad_norm": 0.6997727751731873, |
|
"learning_rate": 2.6532217200065858e-05, |
|
"loss": 0.5858, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 2.296, |
|
"grad_norm": 0.7493643164634705, |
|
"learning_rate": 2.5966453364222186e-05, |
|
"loss": 0.6291, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 2.304, |
|
"grad_norm": 0.8311699032783508, |
|
"learning_rate": 2.540588545758179e-05, |
|
"loss": 0.6418, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 2.312, |
|
"grad_norm": 0.7084354758262634, |
|
"learning_rate": 2.48505528227304e-05, |
|
"loss": 0.5483, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"grad_norm": 0.734438955783844, |
|
"learning_rate": 2.4300494434824373e-05, |
|
"loss": 0.6071, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 2.328, |
|
"grad_norm": 0.8913635015487671, |
|
"learning_rate": 2.37557488988552e-05, |
|
"loss": 0.5099, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 2.336, |
|
"grad_norm": 0.8349048495292664, |
|
"learning_rate": 2.321635444694028e-05, |
|
"loss": 0.5186, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 2.344, |
|
"grad_norm": 0.6164011359214783, |
|
"learning_rate": 2.2682348935639274e-05, |
|
"loss": 0.5043, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 2.352, |
|
"grad_norm": 1.044892430305481, |
|
"learning_rate": 2.2153769843297667e-05, |
|
"loss": 0.61, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"grad_norm": 0.9142879247665405, |
|
"learning_rate": 2.163065426741603e-05, |
|
"loss": 0.5987, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 2.368, |
|
"grad_norm": 0.6232836842536926, |
|
"learning_rate": 2.1113038922046602e-05, |
|
"loss": 0.5212, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 2.376, |
|
"grad_norm": 0.49558231234550476, |
|
"learning_rate": 2.0600960135216462e-05, |
|
"loss": 0.4796, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 2.384, |
|
"grad_norm": 0.7887687683105469, |
|
"learning_rate": 2.009445384637805e-05, |
|
"loss": 0.4844, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 2.392, |
|
"grad_norm": 0.8086990714073181, |
|
"learning_rate": 1.9593555603886538e-05, |
|
"loss": 0.5085, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.6713303327560425, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.4839, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.408, |
|
"grad_norm": 0.6262741684913635, |
|
"learning_rate": 1.8608723480938206e-05, |
|
"loss": 0.5715, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 2.416, |
|
"grad_norm": 0.8025808334350586, |
|
"learning_rate": 1.812485871939056e-05, |
|
"loss": 0.5266, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 2.424, |
|
"grad_norm": 0.8753231167793274, |
|
"learning_rate": 1.7646740237157256e-05, |
|
"loss": 0.5422, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 2.432, |
|
"grad_norm": 0.6459301710128784, |
|
"learning_rate": 1.7174401590239587e-05, |
|
"loss": 0.5553, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 0.6917416453361511, |
|
"learning_rate": 1.6707875928990058e-05, |
|
"loss": 0.5765, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 2.448, |
|
"grad_norm": 0.7890029549598694, |
|
"learning_rate": 1.6247195995785837e-05, |
|
"loss": 0.549, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 2.456, |
|
"grad_norm": 0.9913660883903503, |
|
"learning_rate": 1.579239412273078e-05, |
|
"loss": 0.4876, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 2.464, |
|
"grad_norm": 0.9030985832214355, |
|
"learning_rate": 1.5343502229386207e-05, |
|
"loss": 0.5546, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 2.472, |
|
"grad_norm": 0.9133403301239014, |
|
"learning_rate": 1.4900551820530828e-05, |
|
"loss": 0.5356, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 0.7083793878555298, |
|
"learning_rate": 1.4463573983949341e-05, |
|
"loss": 0.5142, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 2.488, |
|
"grad_norm": 1.095435619354248, |
|
"learning_rate": 1.40325993882509e-05, |
|
"loss": 0.6054, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 2.496, |
|
"grad_norm": 0.8825190663337708, |
|
"learning_rate": 1.3607658280716473e-05, |
|
"loss": 0.5294, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 2.504, |
|
"grad_norm": 0.9436343908309937, |
|
"learning_rate": 1.3188780485176088e-05, |
|
"loss": 0.5294, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 2.512, |
|
"grad_norm": 1.0125439167022705, |
|
"learning_rate": 1.2775995399915631e-05, |
|
"loss": 0.4905, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"grad_norm": 0.8476350903511047, |
|
"learning_rate": 1.2369331995613665e-05, |
|
"loss": 0.5186, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 2.528, |
|
"grad_norm": 0.9092681407928467, |
|
"learning_rate": 1.196881881330798e-05, |
|
"loss": 0.4909, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 2.536, |
|
"grad_norm": 0.7970360517501831, |
|
"learning_rate": 1.1574483962392767e-05, |
|
"loss": 0.5303, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 2.544, |
|
"grad_norm": 0.8575041890144348, |
|
"learning_rate": 1.1186355118645554e-05, |
|
"loss": 0.5169, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 2.552, |
|
"grad_norm": 0.7397408485412598, |
|
"learning_rate": 1.0804459522284926e-05, |
|
"loss": 0.5339, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"grad_norm": 0.7415968179702759, |
|
"learning_rate": 1.042882397605871e-05, |
|
"loss": 0.5283, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.568, |
|
"grad_norm": 0.7035180926322937, |
|
"learning_rate": 1.0059474843362892e-05, |
|
"loss": 0.5576, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 2.576, |
|
"grad_norm": 0.9805112481117249, |
|
"learning_rate": 9.696438046391288e-06, |
|
"loss": 0.5136, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 2.584, |
|
"grad_norm": 0.6661838889122009, |
|
"learning_rate": 9.339739064316233e-06, |
|
"loss": 0.5885, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 2.592, |
|
"grad_norm": 0.8581559062004089, |
|
"learning_rate": 8.989402931500434e-06, |
|
"loss": 0.5, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 0.7146279811859131, |
|
"learning_rate": 8.645454235739903e-06, |
|
"loss": 0.5325, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 2.608, |
|
"grad_norm": 0.9474234580993652, |
|
"learning_rate": 8.307917116538378e-06, |
|
"loss": 0.5772, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 2.616, |
|
"grad_norm": 0.9583209753036499, |
|
"learning_rate": 7.976815263412963e-06, |
|
"loss": 0.5736, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 2.624, |
|
"grad_norm": 0.7156705260276794, |
|
"learning_rate": 7.652171914231776e-06, |
|
"loss": 0.5199, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 2.632, |
|
"grad_norm": 0.8224849700927734, |
|
"learning_rate": 7.3340098535827905e-06, |
|
"loss": 0.5753, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"grad_norm": 0.8689257502555847, |
|
"learning_rate": 7.022351411174866e-06, |
|
"loss": 0.5424, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.648, |
|
"grad_norm": 0.6636053323745728, |
|
"learning_rate": 6.717218460270536e-06, |
|
"loss": 0.5555, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 2.656, |
|
"grad_norm": 0.8688860535621643, |
|
"learning_rate": 6.418632416150927e-06, |
|
"loss": 0.4936, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 2.664, |
|
"grad_norm": 0.6272854208946228, |
|
"learning_rate": 6.126614234612593e-06, |
|
"loss": 0.6291, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 2.672, |
|
"grad_norm": 1.2240337133407593, |
|
"learning_rate": 5.8411844104969916e-06, |
|
"loss": 0.5197, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"grad_norm": 0.9820936918258667, |
|
"learning_rate": 5.562362976251901e-06, |
|
"loss": 0.5398, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 2.6879999999999997, |
|
"grad_norm": 1.1582359075546265, |
|
"learning_rate": 5.290169500525577e-06, |
|
"loss": 0.6059, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 2.6959999999999997, |
|
"grad_norm": 0.5501114726066589, |
|
"learning_rate": 5.024623086793323e-06, |
|
"loss": 0.531, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 2.7039999999999997, |
|
"grad_norm": 0.8848717212677002, |
|
"learning_rate": 4.765742372016735e-06, |
|
"loss": 0.6054, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 2.7119999999999997, |
|
"grad_norm": 0.7358693480491638, |
|
"learning_rate": 4.513545525335705e-06, |
|
"loss": 0.5173, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 2.7199999999999998, |
|
"grad_norm": 0.9218215942382812, |
|
"learning_rate": 4.268050246793276e-06, |
|
"loss": 0.4944, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.7279999999999998, |
|
"grad_norm": 0.6374716758728027, |
|
"learning_rate": 4.029273766093333e-06, |
|
"loss": 0.5183, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 2.7359999999999998, |
|
"grad_norm": 0.583243191242218, |
|
"learning_rate": 3.797232841391407e-06, |
|
"loss": 0.668, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 2.7439999999999998, |
|
"grad_norm": 0.8384690284729004, |
|
"learning_rate": 3.5719437581185454e-06, |
|
"loss": 0.5068, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 2.752, |
|
"grad_norm": 0.8034130334854126, |
|
"learning_rate": 3.3534223278382405e-06, |
|
"loss": 0.5823, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 0.8146041631698608, |
|
"learning_rate": 3.1416838871368924e-06, |
|
"loss": 0.6111, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 2.768, |
|
"grad_norm": 0.8122982382774353, |
|
"learning_rate": 2.936743296547273e-06, |
|
"loss": 0.5231, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 2.776, |
|
"grad_norm": 0.7326982021331787, |
|
"learning_rate": 2.738614939505646e-06, |
|
"loss": 0.5236, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 2.784, |
|
"grad_norm": 0.7472147345542908, |
|
"learning_rate": 2.5473127213422763e-06, |
|
"loss": 0.5657, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 2.792, |
|
"grad_norm": 0.8197700381278992, |
|
"learning_rate": 2.3628500683055222e-06, |
|
"loss": 0.5518, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.8733732104301453, |
|
"learning_rate": 2.1852399266194314e-06, |
|
"loss": 0.4908, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.808, |
|
"grad_norm": 0.8913092017173767, |
|
"learning_rate": 2.014494761575314e-06, |
|
"loss": 0.5459, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 2.816, |
|
"grad_norm": 1.1259772777557373, |
|
"learning_rate": 1.8506265566567094e-06, |
|
"loss": 0.5208, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 2.824, |
|
"grad_norm": 0.7692184448242188, |
|
"learning_rate": 1.6936468126984572e-06, |
|
"loss": 0.5824, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 2.832, |
|
"grad_norm": 0.588602602481842, |
|
"learning_rate": 1.543566547079467e-06, |
|
"loss": 0.5512, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"grad_norm": 0.6324055790901184, |
|
"learning_rate": 1.400396292949513e-06, |
|
"loss": 0.6327, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 2.848, |
|
"grad_norm": 0.7608378529548645, |
|
"learning_rate": 1.26414609848996e-06, |
|
"loss": 0.5292, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 2.856, |
|
"grad_norm": 0.7972851395606995, |
|
"learning_rate": 1.134825526208605e-06, |
|
"loss": 0.5692, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 2.864, |
|
"grad_norm": 0.9705446362495422, |
|
"learning_rate": 1.0124436522684243e-06, |
|
"loss": 0.5532, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 2.872, |
|
"grad_norm": 0.6317399144172668, |
|
"learning_rate": 8.970090658507291e-07, |
|
"loss": 0.5314, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"grad_norm": 0.6457757949829102, |
|
"learning_rate": 7.885298685522235e-07, |
|
"loss": 0.524, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.888, |
|
"grad_norm": 0.8593656420707703, |
|
"learning_rate": 6.870136738164612e-07, |
|
"loss": 0.5227, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 2.896, |
|
"grad_norm": 1.0187020301818848, |
|
"learning_rate": 5.924676063995382e-07, |
|
"loss": 0.5993, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 2.904, |
|
"grad_norm": 0.7082214951515198, |
|
"learning_rate": 5.048983018699827e-07, |
|
"loss": 0.5618, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 2.912, |
|
"grad_norm": 0.6521438956260681, |
|
"learning_rate": 4.2431190614309335e-07, |
|
"loss": 0.5504, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"grad_norm": 0.8906036615371704, |
|
"learning_rate": 3.50714075049563e-07, |
|
"loss": 0.5147, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 2.928, |
|
"grad_norm": 1.0908008813858032, |
|
"learning_rate": 2.841099739386066e-07, |
|
"loss": 0.5564, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 2.936, |
|
"grad_norm": 0.6374122500419617, |
|
"learning_rate": 2.2450427731534053e-07, |
|
"loss": 0.5188, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 2.944, |
|
"grad_norm": 0.9616740345954895, |
|
"learning_rate": 1.7190116851280026e-07, |
|
"loss": 0.5438, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 2.952, |
|
"grad_norm": 1.0712924003601074, |
|
"learning_rate": 1.2630433939825327e-07, |
|
"loss": 0.4962, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"grad_norm": 0.8226613998413086, |
|
"learning_rate": 8.771699011416168e-08, |
|
"loss": 0.5021, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.968, |
|
"grad_norm": 0.9519492983818054, |
|
"learning_rate": 5.6141828853573106e-08, |
|
"loss": 0.5277, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 2.976, |
|
"grad_norm": 0.9817518591880798, |
|
"learning_rate": 3.1581071670006015e-08, |
|
"loss": 0.5764, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 2.984, |
|
"grad_norm": 0.7039242386817932, |
|
"learning_rate": 1.4036442321962995e-08, |
|
"loss": 0.5408, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 2.992, |
|
"grad_norm": 0.591012179851532, |
|
"learning_rate": 3.509172151938689e-09, |
|
"loss": 0.5014, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.7672661542892456, |
|
"learning_rate": 0.0, |
|
"loss": 0.6182, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1875, |
|
"total_flos": 1.1764404625814323e+17, |
|
"train_loss": 0.6465437274932861, |
|
"train_runtime": 2365.2475, |
|
"train_samples_per_second": 12.684, |
|
"train_steps_per_second": 0.793 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1875, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1764404625814323e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|