|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 16464, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00018221574344023323, |
|
"eval_accuracy": 0.013372916488627156, |
|
"eval_loss": 10.764164924621582, |
|
"eval_runtime": 265.3902, |
|
"eval_samples_per_second": 71.762, |
|
"eval_steps_per_second": 2.246, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00036443148688046647, |
|
"eval_accuracy": 0.038661994909930615, |
|
"eval_loss": 10.623634338378906, |
|
"eval_runtime": 265.6524, |
|
"eval_samples_per_second": 71.691, |
|
"eval_steps_per_second": 2.244, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0007288629737609329, |
|
"eval_accuracy": 0.0419639958558818, |
|
"eval_loss": 10.446562767028809, |
|
"eval_runtime": 265.6133, |
|
"eval_samples_per_second": 71.702, |
|
"eval_steps_per_second": 2.244, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0014577259475218659, |
|
"eval_accuracy": 0.04410673183105199, |
|
"eval_loss": 10.253758430480957, |
|
"eval_runtime": 266.28, |
|
"eval_samples_per_second": 71.522, |
|
"eval_steps_per_second": 2.238, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0029154518950437317, |
|
"eval_accuracy": 0.04422052313718063, |
|
"eval_loss": 10.048787117004395, |
|
"eval_runtime": 265.5895, |
|
"eval_samples_per_second": 71.708, |
|
"eval_steps_per_second": 2.244, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0058309037900874635, |
|
"eval_accuracy": 0.05247016186133218, |
|
"eval_loss": 9.678139686584473, |
|
"eval_runtime": 266.1849, |
|
"eval_samples_per_second": 71.548, |
|
"eval_steps_per_second": 2.239, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.011661807580174927, |
|
"eval_accuracy": 0.06491211456531285, |
|
"eval_loss": 9.024864196777344, |
|
"eval_runtime": 266.9794, |
|
"eval_samples_per_second": 71.335, |
|
"eval_steps_per_second": 2.232, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.023323615160349854, |
|
"eval_accuracy": 0.08493399513987425, |
|
"eval_loss": 8.21505069732666, |
|
"eval_runtime": 266.8342, |
|
"eval_samples_per_second": 71.374, |
|
"eval_steps_per_second": 2.234, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.04664723032069971, |
|
"eval_accuracy": 0.10968881388346323, |
|
"eval_loss": 7.2851386070251465, |
|
"eval_runtime": 267.6197, |
|
"eval_samples_per_second": 71.164, |
|
"eval_steps_per_second": 2.227, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.09110787172011661, |
|
"grad_norm": 0.5542777180671692, |
|
"learning_rate": 4.848153547133139e-05, |
|
"loss": 7.6369, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09329446064139942, |
|
"eval_accuracy": 0.12987750625095115, |
|
"eval_loss": 6.427273273468018, |
|
"eval_runtime": 267.5439, |
|
"eval_samples_per_second": 71.185, |
|
"eval_steps_per_second": 2.228, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.18221574344023322, |
|
"grad_norm": 0.6718143820762634, |
|
"learning_rate": 4.696307094266278e-05, |
|
"loss": 6.0229, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18658892128279883, |
|
"eval_accuracy": 0.16116734379422917, |
|
"eval_loss": 5.696187496185303, |
|
"eval_runtime": 268.1415, |
|
"eval_samples_per_second": 71.026, |
|
"eval_steps_per_second": 2.223, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 0.27332361516034986, |
|
"grad_norm": 0.6730483174324036, |
|
"learning_rate": 4.5444606413994175e-05, |
|
"loss": 5.5062, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36443148688046645, |
|
"grad_norm": 0.7009645700454712, |
|
"learning_rate": 4.3926141885325564e-05, |
|
"loss": 5.2064, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.37317784256559766, |
|
"eval_accuracy": 0.1897994845258965, |
|
"eval_loss": 5.082965850830078, |
|
"eval_runtime": 268.3609, |
|
"eval_samples_per_second": 70.968, |
|
"eval_steps_per_second": 2.221, |
|
"step": 2048 |
|
}, |
|
{ |
|
"epoch": 0.4555393586005831, |
|
"grad_norm": 0.7892386317253113, |
|
"learning_rate": 4.240767735665695e-05, |
|
"loss": 4.9923, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.5466472303206997, |
|
"grad_norm": 0.8406383991241455, |
|
"learning_rate": 4.088921282798834e-05, |
|
"loss": 4.8437, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6377551020408163, |
|
"grad_norm": 0.8709362149238586, |
|
"learning_rate": 3.937074829931973e-05, |
|
"loss": 4.7268, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7288629737609329, |
|
"grad_norm": 0.9013070464134216, |
|
"learning_rate": 3.785228377065112e-05, |
|
"loss": 4.6324, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7463556851311953, |
|
"eval_accuracy": 0.22371889184616256, |
|
"eval_loss": 4.59232759475708, |
|
"eval_runtime": 268.1172, |
|
"eval_samples_per_second": 71.032, |
|
"eval_steps_per_second": 2.223, |
|
"step": 4096 |
|
}, |
|
{ |
|
"epoch": 0.8199708454810496, |
|
"grad_norm": 0.9396291971206665, |
|
"learning_rate": 3.6333819241982507e-05, |
|
"loss": 4.556, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9110787172011662, |
|
"grad_norm": 1.0571624040603638, |
|
"learning_rate": 3.4815354713313895e-05, |
|
"loss": 4.4846, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.2389537359040827, |
|
"eval_loss": 4.42299747467041, |
|
"eval_runtime": 268.0887, |
|
"eval_samples_per_second": 71.04, |
|
"eval_steps_per_second": 2.223, |
|
"step": 5488 |
|
}, |
|
{ |
|
"epoch": 1.0021865889212829, |
|
"grad_norm": 0.9928905963897705, |
|
"learning_rate": 3.329689018464529e-05, |
|
"loss": 4.4267, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.0932944606413995, |
|
"grad_norm": 1.0094623565673828, |
|
"learning_rate": 3.177842565597668e-05, |
|
"loss": 4.3598, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.184402332361516, |
|
"grad_norm": 1.1072958707809448, |
|
"learning_rate": 3.0259961127308068e-05, |
|
"loss": 4.3227, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2755102040816326, |
|
"grad_norm": 1.047276258468628, |
|
"learning_rate": 2.8741496598639456e-05, |
|
"loss": 4.2846, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3666180758017492, |
|
"grad_norm": 1.0852686166763306, |
|
"learning_rate": 2.7223032069970845e-05, |
|
"loss": 4.2512, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4577259475218658, |
|
"grad_norm": 1.080506443977356, |
|
"learning_rate": 2.5704567541302237e-05, |
|
"loss": 4.2266, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4927113702623906, |
|
"eval_accuracy": 0.25919180456227686, |
|
"eval_loss": 4.229198455810547, |
|
"eval_runtime": 268.9266, |
|
"eval_samples_per_second": 70.819, |
|
"eval_steps_per_second": 2.216, |
|
"step": 8192 |
|
}, |
|
{ |
|
"epoch": 1.5488338192419824, |
|
"grad_norm": 1.1083309650421143, |
|
"learning_rate": 2.4186103012633625e-05, |
|
"loss": 4.1916, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.639941690962099, |
|
"grad_norm": 1.1683882474899292, |
|
"learning_rate": 2.2667638483965014e-05, |
|
"loss": 4.1699, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.7310495626822158, |
|
"grad_norm": 1.2223962545394897, |
|
"learning_rate": 2.1149173955296406e-05, |
|
"loss": 4.1517, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8221574344023324, |
|
"grad_norm": 1.1384245157241821, |
|
"learning_rate": 1.9630709426627795e-05, |
|
"loss": 4.1228, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.913265306122449, |
|
"grad_norm": 1.2079499959945679, |
|
"learning_rate": 1.8112244897959187e-05, |
|
"loss": 4.1079, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.2717258887026585, |
|
"eval_loss": 4.115562438964844, |
|
"eval_runtime": 267.5246, |
|
"eval_samples_per_second": 71.19, |
|
"eval_steps_per_second": 2.228, |
|
"step": 10976 |
|
}, |
|
{ |
|
"epoch": 2.0043731778425657, |
|
"grad_norm": 1.215710163116455, |
|
"learning_rate": 1.6593780369290575e-05, |
|
"loss": 4.0902, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.0954810495626823, |
|
"grad_norm": 1.2627053260803223, |
|
"learning_rate": 1.5075315840621965e-05, |
|
"loss": 4.0556, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.186588921282799, |
|
"grad_norm": 1.2118171453475952, |
|
"learning_rate": 1.3556851311953352e-05, |
|
"loss": 4.0461, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.2776967930029155, |
|
"grad_norm": 1.3033018112182617, |
|
"learning_rate": 1.2038386783284743e-05, |
|
"loss": 4.0326, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.368804664723032, |
|
"grad_norm": 1.2471245527267456, |
|
"learning_rate": 1.0519922254616133e-05, |
|
"loss": 4.0282, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.4599125364431487, |
|
"grad_norm": 1.304821252822876, |
|
"learning_rate": 9.001457725947522e-06, |
|
"loss": 4.0197, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.5510204081632653, |
|
"grad_norm": 1.1969258785247803, |
|
"learning_rate": 7.482993197278912e-06, |
|
"loss": 4.0076, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.642128279883382, |
|
"grad_norm": 1.264854073524475, |
|
"learning_rate": 5.964528668610301e-06, |
|
"loss": 3.9948, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.7332361516034984, |
|
"grad_norm": 1.2441176176071167, |
|
"learning_rate": 4.4460641399416915e-06, |
|
"loss": 3.9953, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.824344023323615, |
|
"grad_norm": 1.2413525581359863, |
|
"learning_rate": 2.9275996112730806e-06, |
|
"loss": 3.9837, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.9154518950437316, |
|
"grad_norm": 1.2331761121749878, |
|
"learning_rate": 1.4091350826044704e-06, |
|
"loss": 3.9841, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.9854227405247813, |
|
"eval_accuracy": 0.28256937381675906, |
|
"eval_loss": 4.027393817901611, |
|
"eval_runtime": 267.6695, |
|
"eval_samples_per_second": 71.151, |
|
"eval_steps_per_second": 2.227, |
|
"step": 16384 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.2825705030042804, |
|
"eval_loss": 4.0273261070251465, |
|
"eval_runtime": 268.1735, |
|
"eval_samples_per_second": 71.017, |
|
"eval_steps_per_second": 2.222, |
|
"step": 16464 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 16464, |
|
"total_flos": 1.44589665695957e+17, |
|
"train_loss": 4.469071524483817, |
|
"train_runtime": 22077.8476, |
|
"train_samples_per_second": 23.862, |
|
"train_steps_per_second": 0.746 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16464, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.44589665695957e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|