Spaces:
Build error
Build error
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 1.7910447761194028, | |
"eval_steps": 35, | |
"global_step": 315, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.028429282160625444, | |
"grad_norm": 5.805906295776367, | |
"learning_rate": 1.4285714285714285e-05, | |
"loss": 10.5536, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.05685856432125089, | |
"grad_norm": 7.361557960510254, | |
"learning_rate": 2.857142857142857e-05, | |
"loss": 10.4859, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.08528784648187633, | |
"grad_norm": 9.014820098876953, | |
"learning_rate": 4.2857142857142856e-05, | |
"loss": 9.3188, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.11371712864250177, | |
"grad_norm": 10.094988822937012, | |
"learning_rate": 5.714285714285714e-05, | |
"loss": 6.6553, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.14214641080312723, | |
"grad_norm": 15.287639617919922, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 3.4581, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.17057569296375266, | |
"grad_norm": 1.3499271869659424, | |
"learning_rate": 8.571428571428571e-05, | |
"loss": 0.5521, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"grad_norm": 1.5600136518478394, | |
"learning_rate": 0.0001, | |
"loss": 0.3823, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.19900497512437812, | |
"eval_loss": 0.35117578506469727, | |
"eval_runtime": 230.9383, | |
"eval_samples_per_second": 10.825, | |
"eval_steps_per_second": 10.825, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.22743425728500355, | |
"grad_norm": 3.365349531173706, | |
"learning_rate": 9.993784606094612e-05, | |
"loss": 0.363, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.255863539445629, | |
"grad_norm": 1.3274873495101929, | |
"learning_rate": 9.975153876827008e-05, | |
"loss": 0.3428, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.28429282160625446, | |
"grad_norm": 1.2516767978668213, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 0.349, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.31272210376687987, | |
"grad_norm": 1.22030770778656, | |
"learning_rate": 9.900862439242719e-05, | |
"loss": 0.3336, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.3411513859275053, | |
"grad_norm": 1.0675277709960938, | |
"learning_rate": 9.84538643114539e-05, | |
"loss": 0.3486, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.3695806680881308, | |
"grad_norm": 1.42255699634552, | |
"learning_rate": 9.777864028930705e-05, | |
"loss": 0.2993, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"grad_norm": 2.10113787651062, | |
"learning_rate": 9.698463103929542e-05, | |
"loss": 0.3257, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.39800995024875624, | |
"eval_loss": 0.2916417121887207, | |
"eval_runtime": 231.0131, | |
"eval_samples_per_second": 10.822, | |
"eval_steps_per_second": 10.822, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42643923240938164, | |
"grad_norm": 1.51693594455719, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 0.3209, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.4548685145700071, | |
"grad_norm": 1.0357732772827148, | |
"learning_rate": 9.504844339512095e-05, | |
"loss": 0.2859, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48329779673063256, | |
"grad_norm": 0.8819968104362488, | |
"learning_rate": 9.391107866851143e-05, | |
"loss": 0.3123, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.511727078891258, | |
"grad_norm": 0.9069387316703796, | |
"learning_rate": 9.266454408160779e-05, | |
"loss": 0.273, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.5401563610518835, | |
"grad_norm": 0.9045886993408203, | |
"learning_rate": 9.131193871579975e-05, | |
"loss": 0.2927, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.5685856432125089, | |
"grad_norm": 1.3000465631484985, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 0.2767, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"grad_norm": 0.7296444177627563, | |
"learning_rate": 8.83022221559489e-05, | |
"loss": 0.2834, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.5970149253731343, | |
"eval_loss": 0.28583231568336487, | |
"eval_runtime": 230.4985, | |
"eval_samples_per_second": 10.846, | |
"eval_steps_per_second": 10.846, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.6254442075337597, | |
"grad_norm": 0.6089270710945129, | |
"learning_rate": 8.665259359149132e-05, | |
"loss": 0.2764, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.6538734896943852, | |
"grad_norm": 0.8748773336410522, | |
"learning_rate": 8.491184090430364e-05, | |
"loss": 0.2816, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.6823027718550106, | |
"grad_norm": 0.7506688833236694, | |
"learning_rate": 8.308429187984297e-05, | |
"loss": 0.2766, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.7107320540156361, | |
"grad_norm": 0.6250064373016357, | |
"learning_rate": 8.117449009293668e-05, | |
"loss": 0.2746, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.7391613361762616, | |
"grad_norm": 0.687236487865448, | |
"learning_rate": 7.91871836117395e-05, | |
"loss": 0.2691, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.767590618336887, | |
"grad_norm": 1.2124730348587036, | |
"learning_rate": 7.712731319328798e-05, | |
"loss": 0.2977, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"grad_norm": 0.7678080797195435, | |
"learning_rate": 7.500000000000001e-05, | |
"loss": 0.2809, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.7960199004975125, | |
"eval_loss": 0.2728855609893799, | |
"eval_runtime": 230.8787, | |
"eval_samples_per_second": 10.828, | |
"eval_steps_per_second": 10.828, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.8244491826581379, | |
"grad_norm": 1.257142424583435, | |
"learning_rate": 7.281053286765815e-05, | |
"loss": 0.2658, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.8528784648187633, | |
"grad_norm": 0.8419828414916992, | |
"learning_rate": 7.056435515653059e-05, | |
"loss": 0.289, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.8813077469793887, | |
"grad_norm": 0.6198025345802307, | |
"learning_rate": 6.826705121831976e-05, | |
"loss": 0.2941, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.9097370291400142, | |
"grad_norm": 0.5303702354431152, | |
"learning_rate": 6.592433251258423e-05, | |
"loss": 0.2752, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9381663113006397, | |
"grad_norm": 1.7567023038864136, | |
"learning_rate": 6.354202340715026e-05, | |
"loss": 0.2569, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.9665955934612651, | |
"grad_norm": 0.6840919852256775, | |
"learning_rate": 6.112604669781572e-05, | |
"loss": 0.2587, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"grad_norm": 0.5812920928001404, | |
"learning_rate": 5.868240888334653e-05, | |
"loss": 0.2826, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.9950248756218906, | |
"eval_loss": 0.2588258981704712, | |
"eval_runtime": 231.051, | |
"eval_samples_per_second": 10.82, | |
"eval_steps_per_second": 10.82, | |
"step": 175 | |
}, | |
{ | |
"epoch": 1.023454157782516, | |
"grad_norm": 0.5891302824020386, | |
"learning_rate": 5.621718523237427e-05, | |
"loss": 0.2676, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.0518834399431414, | |
"grad_norm": 0.7072641253471375, | |
"learning_rate": 5.373650467932122e-05, | |
"loss": 0.2378, | |
"step": 185 | |
}, | |
{ | |
"epoch": 1.080312722103767, | |
"grad_norm": 1.350577712059021, | |
"learning_rate": 5.124653458690365e-05, | |
"loss": 0.2682, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.1087420042643923, | |
"grad_norm": 0.8520832657814026, | |
"learning_rate": 4.875346541309637e-05, | |
"loss": 0.2454, | |
"step": 195 | |
}, | |
{ | |
"epoch": 1.1371712864250179, | |
"grad_norm": 0.7263408303260803, | |
"learning_rate": 4.626349532067879e-05, | |
"loss": 0.2622, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.1656005685856432, | |
"grad_norm": 1.0578335523605347, | |
"learning_rate": 4.378281476762576e-05, | |
"loss": 0.258, | |
"step": 205 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"grad_norm": 0.5315471291542053, | |
"learning_rate": 4.131759111665349e-05, | |
"loss": 0.2476, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.1940298507462686, | |
"eval_loss": 0.2553260922431946, | |
"eval_runtime": 230.8625, | |
"eval_samples_per_second": 10.829, | |
"eval_steps_per_second": 10.829, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.2224591329068941, | |
"grad_norm": 0.5915615558624268, | |
"learning_rate": 3.887395330218429e-05, | |
"loss": 0.2368, | |
"step": 215 | |
}, | |
{ | |
"epoch": 1.2508884150675195, | |
"grad_norm": 0.7240003943443298, | |
"learning_rate": 3.6457976592849754e-05, | |
"loss": 0.2334, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.279317697228145, | |
"grad_norm": 1.510029673576355, | |
"learning_rate": 3.4075667487415785e-05, | |
"loss": 0.2636, | |
"step": 225 | |
}, | |
{ | |
"epoch": 1.3077469793887704, | |
"grad_norm": 0.8403149247169495, | |
"learning_rate": 3.173294878168025e-05, | |
"loss": 0.2528, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.336176261549396, | |
"grad_norm": 0.5781741142272949, | |
"learning_rate": 2.9435644843469436e-05, | |
"loss": 0.2489, | |
"step": 235 | |
}, | |
{ | |
"epoch": 1.3646055437100213, | |
"grad_norm": 0.6707499027252197, | |
"learning_rate": 2.718946713234185e-05, | |
"loss": 0.2485, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.3930348258706466, | |
"grad_norm": 1.0248913764953613, | |
"learning_rate": 2.500000000000001e-05, | |
"loss": 0.2694, | |
"step": 245 | |
}, | |
{ | |
"epoch": 1.3930348258706466, | |
"eval_loss": 0.2521698772907257, | |
"eval_runtime": 230.6792, | |
"eval_samples_per_second": 10.838, | |
"eval_steps_per_second": 10.838, | |
"step": 245 | |
}, | |
{ | |
"epoch": 1.4214641080312722, | |
"grad_norm": 0.7928401231765747, | |
"learning_rate": 2.2872686806712035e-05, | |
"loss": 0.2506, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.4498933901918978, | |
"grad_norm": 0.7061648368835449, | |
"learning_rate": 2.0812816388260518e-05, | |
"loss": 0.2397, | |
"step": 255 | |
}, | |
{ | |
"epoch": 1.4783226723525231, | |
"grad_norm": 0.673512876033783, | |
"learning_rate": 1.8825509907063327e-05, | |
"loss": 0.2138, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.5067519545131485, | |
"grad_norm": 0.6658352613449097, | |
"learning_rate": 1.691570812015704e-05, | |
"loss": 0.2417, | |
"step": 265 | |
}, | |
{ | |
"epoch": 1.535181236673774, | |
"grad_norm": 0.7412824034690857, | |
"learning_rate": 1.5088159095696363e-05, | |
"loss": 0.2568, | |
"step": 270 | |
}, | |
{ | |
"epoch": 1.5636105188343994, | |
"grad_norm": 1.0643491744995117, | |
"learning_rate": 1.3347406408508695e-05, | |
"loss": 0.2246, | |
"step": 275 | |
}, | |
{ | |
"epoch": 1.5920398009950247, | |
"grad_norm": 0.8839688301086426, | |
"learning_rate": 1.1697777844051105e-05, | |
"loss": 0.249, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.5920398009950247, | |
"eval_loss": 0.253582626581192, | |
"eval_runtime": 230.7977, | |
"eval_samples_per_second": 10.832, | |
"eval_steps_per_second": 10.832, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.6204690831556503, | |
"grad_norm": 1.12769615650177, | |
"learning_rate": 1.0143374638853891e-05, | |
"loss": 0.2577, | |
"step": 285 | |
}, | |
{ | |
"epoch": 1.6488983653162759, | |
"grad_norm": 0.8129110336303711, | |
"learning_rate": 8.688061284200266e-06, | |
"loss": 0.238, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.6773276474769012, | |
"grad_norm": 0.6995929479598999, | |
"learning_rate": 7.33545591839222e-06, | |
"loss": 0.2255, | |
"step": 295 | |
}, | |
{ | |
"epoch": 1.7057569296375266, | |
"grad_norm": 0.9470205903053284, | |
"learning_rate": 6.088921331488568e-06, | |
"loss": 0.251, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.7341862117981521, | |
"grad_norm": 0.8157157897949219, | |
"learning_rate": 4.951556604879048e-06, | |
"loss": 0.2634, | |
"step": 305 | |
}, | |
{ | |
"epoch": 1.7626154939587777, | |
"grad_norm": 0.7747371196746826, | |
"learning_rate": 3.9261894064796135e-06, | |
"loss": 0.2154, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.7910447761194028, | |
"grad_norm": 0.8623983263969421, | |
"learning_rate": 3.0153689607045845e-06, | |
"loss": 0.2255, | |
"step": 315 | |
}, | |
{ | |
"epoch": 1.7910447761194028, | |
"eval_loss": 0.25022172927856445, | |
"eval_runtime": 231.0404, | |
"eval_samples_per_second": 10.821, | |
"eval_steps_per_second": 10.821, | |
"step": 315 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 350, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 2, | |
"save_steps": 35, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 8.612097401471631e+17, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |