|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.977640545144804, |
|
"global_step": 93500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 2.98418867120954e-05, |
|
"loss": 4.6657, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2.9682176320272572e-05, |
|
"loss": 4.167, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 2.9522465928449746e-05, |
|
"loss": 4.0464, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 2.9362755536626915e-05, |
|
"loss": 3.9743, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.9203045144804092e-05, |
|
"loss": 3.9457, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.904333475298126e-05, |
|
"loss": 3.961, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 2.8883624361158435e-05, |
|
"loss": 3.9259, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 2.8723913969335604e-05, |
|
"loss": 3.9035, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.8564522998296424e-05, |
|
"loss": 3.8129, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.8404812606473597e-05, |
|
"loss": 3.9113, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.8245102214650767e-05, |
|
"loss": 3.8406, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 2.808539182282794e-05, |
|
"loss": 3.8943, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 2.7926000851788756e-05, |
|
"loss": 3.8532, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.776629045996593e-05, |
|
"loss": 3.8599, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 2.76065800681431e-05, |
|
"loss": 3.8322, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 2.744718909710392e-05, |
|
"loss": 3.8144, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 2.728747870528109e-05, |
|
"loss": 3.786, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 2.712776831345826e-05, |
|
"loss": 3.7725, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.6968057921635438e-05, |
|
"loss": 3.8074, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.6808347529812607e-05, |
|
"loss": 3.8163, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.664863713798978e-05, |
|
"loss": 3.8075, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.648892674616695e-05, |
|
"loss": 3.7558, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 2.6329216354344123e-05, |
|
"loss": 3.7775, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 2.616982538330494e-05, |
|
"loss": 3.7994, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.6010114991482113e-05, |
|
"loss": 3.7975, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 2.5850404599659286e-05, |
|
"loss": 3.7754, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 2.569069420783646e-05, |
|
"loss": 3.7374, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 2.5531303236797275e-05, |
|
"loss": 3.7527, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 2.5371592844974445e-05, |
|
"loss": 3.7226, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 2.521188245315162e-05, |
|
"loss": 3.8012, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 2.505217206132879e-05, |
|
"loss": 3.7938, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 2.4892781090289607e-05, |
|
"loss": 3.7989, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.4733070698466784e-05, |
|
"loss": 3.7258, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 2.4573360306643953e-05, |
|
"loss": 3.7496, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 2.4413649914821123e-05, |
|
"loss": 3.7927, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.4254258943781943e-05, |
|
"loss": 3.7524, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 2.4094548551959116e-05, |
|
"loss": 3.7317, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_gen_len": 5.9904, |
|
"eval_loss": 4.179744243621826, |
|
"eval_rouge1": 12.674, |
|
"eval_rouge2": 2.9346, |
|
"eval_rougeL": 12.6863, |
|
"eval_rougeLsum": 12.6925, |
|
"eval_runtime": 104.7366, |
|
"eval_samples_per_second": 72.563, |
|
"eval_steps_per_second": 9.07, |
|
"step": 18784 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 2.3934838160136285e-05, |
|
"loss": 3.7256, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 2.377512776831346e-05, |
|
"loss": 3.7268, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 2.3615736797274278e-05, |
|
"loss": 3.7327, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.3456026405451448e-05, |
|
"loss": 3.7136, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 2.329631601362862e-05, |
|
"loss": 3.708, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 2.313660562180579e-05, |
|
"loss": 3.6911, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 2.297721465076661e-05, |
|
"loss": 3.6855, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 2.2817504258943783e-05, |
|
"loss": 3.7239, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.2657793867120953e-05, |
|
"loss": 3.6898, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 2.249808347529813e-05, |
|
"loss": 3.6916, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.2338692504258946e-05, |
|
"loss": 3.7224, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.2178982112436115e-05, |
|
"loss": 3.7074, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2.201927172061329e-05, |
|
"loss": 3.6832, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 2.185956132879046e-05, |
|
"loss": 3.6984, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 2.1700170357751278e-05, |
|
"loss": 3.617, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 2.154045996592845e-05, |
|
"loss": 3.6604, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 2.1380749574105624e-05, |
|
"loss": 3.666, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 2.1221039182282794e-05, |
|
"loss": 3.6795, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 2.1061648211243613e-05, |
|
"loss": 3.7, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.0901937819420783e-05, |
|
"loss": 3.7005, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.0742227427597956e-05, |
|
"loss": 3.6941, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.058251703577513e-05, |
|
"loss": 3.6645, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.0423126064735945e-05, |
|
"loss": 3.6256, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 2.026341567291312e-05, |
|
"loss": 3.6873, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.010370528109029e-05, |
|
"loss": 3.6912, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 1.994399488926746e-05, |
|
"loss": 3.6764, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 1.9784603918228277e-05, |
|
"loss": 3.6543, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 1.9624893526405454e-05, |
|
"loss": 3.6974, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.9465183134582624e-05, |
|
"loss": 3.6267, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 1.9305472742759797e-05, |
|
"loss": 3.7287, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 1.9146081771720616e-05, |
|
"loss": 3.6518, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.8986371379897786e-05, |
|
"loss": 3.659, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.8826660988074956e-05, |
|
"loss": 3.6865, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.866695059625213e-05, |
|
"loss": 3.649, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 1.850755962521295e-05, |
|
"loss": 3.69, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 1.8347849233390118e-05, |
|
"loss": 3.6672, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.818813884156729e-05, |
|
"loss": 3.6634, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.8028428449744464e-05, |
|
"loss": 3.6919, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_gen_len": 6.0964, |
|
"eval_loss": 4.154943943023682, |
|
"eval_rouge1": 12.7072, |
|
"eval_rouge2": 2.9431, |
|
"eval_rougeL": 12.7199, |
|
"eval_rougeLsum": 12.7279, |
|
"eval_runtime": 105.4406, |
|
"eval_samples_per_second": 72.079, |
|
"eval_steps_per_second": 9.01, |
|
"step": 37568 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 1.786903747870528e-05, |
|
"loss": 3.6437, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 1.7709327086882454e-05, |
|
"loss": 3.6416, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 1.7549616695059623e-05, |
|
"loss": 3.6792, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.73899063032368e-05, |
|
"loss": 3.6606, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.7230515332197616e-05, |
|
"loss": 3.6146, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.7070804940374786e-05, |
|
"loss": 3.6238, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.6911094548551962e-05, |
|
"loss": 3.6344, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 1.6751384156729132e-05, |
|
"loss": 3.6348, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.6591993185689948e-05, |
|
"loss": 3.6357, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.643228279386712e-05, |
|
"loss": 3.6303, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.6272572402044294e-05, |
|
"loss": 3.6216, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 1.6112862010221464e-05, |
|
"loss": 3.6247, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 1.5953471039182283e-05, |
|
"loss": 3.6372, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 1.5793760647359457e-05, |
|
"loss": 3.6249, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.5634050255536626e-05, |
|
"loss": 3.6202, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.54743398637138e-05, |
|
"loss": 3.6435, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 1.531494889267462e-05, |
|
"loss": 3.6407, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 1.5155238500851789e-05, |
|
"loss": 3.6552, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 1.4995528109028962e-05, |
|
"loss": 3.6302, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.4835817717206133e-05, |
|
"loss": 3.5919, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.4676426746166951e-05, |
|
"loss": 3.5796, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 1.4516716354344122e-05, |
|
"loss": 3.6223, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 1.4357005962521294e-05, |
|
"loss": 3.5963, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.4197295570698467e-05, |
|
"loss": 3.6373, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 1.4037904599659285e-05, |
|
"loss": 3.5909, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 1.3878194207836456e-05, |
|
"loss": 3.6421, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.371848381601363e-05, |
|
"loss": 3.5948, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.35587734241908e-05, |
|
"loss": 3.6234, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.3399382453151619e-05, |
|
"loss": 3.6113, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.323967206132879e-05, |
|
"loss": 3.5963, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.3079961669505963e-05, |
|
"loss": 3.628, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 1.2920251277683136e-05, |
|
"loss": 3.6178, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 1.2760860306643952e-05, |
|
"loss": 3.6027, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 1.2601149914821126e-05, |
|
"loss": 3.6703, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.2441439522998297e-05, |
|
"loss": 3.6103, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.2281729131175468e-05, |
|
"loss": 3.596, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 1.2122338160136286e-05, |
|
"loss": 3.6205, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_gen_len": 6.1543, |
|
"eval_loss": 4.141897678375244, |
|
"eval_rouge1": 12.7145, |
|
"eval_rouge2": 2.93, |
|
"eval_rougeL": 12.7274, |
|
"eval_rougeLsum": 12.7376, |
|
"eval_runtime": 108.5251, |
|
"eval_samples_per_second": 70.03, |
|
"eval_steps_per_second": 8.754, |
|
"step": 56352 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 1.196262776831346e-05, |
|
"loss": 3.6473, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 3.03, |
|
"learning_rate": 1.180291737649063e-05, |
|
"loss": 3.5711, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.1643206984667802e-05, |
|
"loss": 3.6248, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.148381601362862e-05, |
|
"loss": 3.6218, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.1324105621805791e-05, |
|
"loss": 3.6035, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.1164395229982965e-05, |
|
"loss": 3.6007, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 3.17, |
|
"learning_rate": 1.1004684838160136e-05, |
|
"loss": 3.6097, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.0845293867120954e-05, |
|
"loss": 3.5995, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"learning_rate": 1.0685583475298127e-05, |
|
"loss": 3.5552, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.0525873083475298e-05, |
|
"loss": 3.5995, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 1.0366162691652471e-05, |
|
"loss": 3.6177, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.0206771720613288e-05, |
|
"loss": 3.6039, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.004706132879046e-05, |
|
"loss": 3.6237, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 9.887350936967632e-06, |
|
"loss": 3.5536, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 9.727640545144804e-06, |
|
"loss": 3.6287, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 9.568249574105623e-06, |
|
"loss": 3.6013, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 9.408539182282794e-06, |
|
"loss": 3.6343, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 9.248828790459966e-06, |
|
"loss": 3.6494, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 9.089118398637137e-06, |
|
"loss": 3.6276, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 8.929727427597955e-06, |
|
"loss": 3.606, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"learning_rate": 8.770017035775128e-06, |
|
"loss": 3.5757, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 8.6103066439523e-06, |
|
"loss": 3.5841, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 8.450596252129473e-06, |
|
"loss": 3.5987, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 8.291205281090289e-06, |
|
"loss": 3.5761, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 8.131494889267462e-06, |
|
"loss": 3.5931, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 7.971784497444633e-06, |
|
"loss": 3.5937, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 7.812074105621807e-06, |
|
"loss": 3.5696, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 7.652683134582624e-06, |
|
"loss": 3.6207, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 7.492972742759796e-06, |
|
"loss": 3.5782, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 3.78, |
|
"learning_rate": 7.333262350936967e-06, |
|
"loss": 3.5555, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 7.1735519591141395e-06, |
|
"loss": 3.5753, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 7.014160988074958e-06, |
|
"loss": 3.5215, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 6.85445059625213e-06, |
|
"loss": 3.5889, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 6.694740204429302e-06, |
|
"loss": 3.5766, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 6.535029812606473e-06, |
|
"loss": 3.5778, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 3.94, |
|
"learning_rate": 6.375638841567291e-06, |
|
"loss": 3.6086, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 6.2159284497444634e-06, |
|
"loss": 3.5562, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 6.056218057921636e-06, |
|
"loss": 3.5603, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_gen_len": 6.1403, |
|
"eval_loss": 4.137481212615967, |
|
"eval_rouge1": 12.6976, |
|
"eval_rouge2": 2.9239, |
|
"eval_rougeL": 12.7112, |
|
"eval_rougeLsum": 12.7168, |
|
"eval_runtime": 106.3438, |
|
"eval_samples_per_second": 71.466, |
|
"eval_steps_per_second": 8.933, |
|
"step": 75136 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 5.896507666098808e-06, |
|
"loss": 3.5449, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 5.737116695059626e-06, |
|
"loss": 3.5331, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 5.577406303236797e-06, |
|
"loss": 3.5443, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 5.4176959114139695e-06, |
|
"loss": 3.5557, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 5.257985519591142e-06, |
|
"loss": 3.6073, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 5.098594548551959e-06, |
|
"loss": 3.5791, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 4.938884156729132e-06, |
|
"loss": 3.6755, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 4.779173764906303e-06, |
|
"loss": 3.5966, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 4.23, |
|
"learning_rate": 4.6194633730834755e-06, |
|
"loss": 3.5897, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 4.26, |
|
"learning_rate": 4.459752981260648e-06, |
|
"loss": 3.5891, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 4.300362010221465e-06, |
|
"loss": 3.5978, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 4.140651618398637e-06, |
|
"loss": 3.5899, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 3.980941226575809e-06, |
|
"loss": 3.5805, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 3.8212308347529816e-06, |
|
"loss": 3.5577, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 3.6618398637137994e-06, |
|
"loss": 3.5753, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 4.42, |
|
"learning_rate": 3.5021294718909713e-06, |
|
"loss": 3.576, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 3.342419080068143e-06, |
|
"loss": 3.5951, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 3.182708688245315e-06, |
|
"loss": 3.5792, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 3.0233177172061332e-06, |
|
"loss": 3.5824, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 4.53, |
|
"learning_rate": 2.863607325383305e-06, |
|
"loss": 3.5487, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.703896933560477e-06, |
|
"loss": 3.5632, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 2.544186541737649e-06, |
|
"loss": 3.6142, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"learning_rate": 2.384795570698467e-06, |
|
"loss": 3.4888, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 2.225085178875639e-06, |
|
"loss": 3.5823, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 2.0653747870528107e-06, |
|
"loss": 3.5525, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 1.9056643952299831e-06, |
|
"loss": 3.592, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 4.71, |
|
"learning_rate": 1.7462734241908006e-06, |
|
"loss": 3.5657, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.5865630323679728e-06, |
|
"loss": 3.5557, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 4.76, |
|
"learning_rate": 1.4268526405451449e-06, |
|
"loss": 3.5917, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 1.267142248722317e-06, |
|
"loss": 3.6155, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 1.1077512776831346e-06, |
|
"loss": 3.5609, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 9.480408858603066e-07, |
|
"loss": 3.5539, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 7.883304940374787e-07, |
|
"loss": 3.5676, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 6.286201022146508e-07, |
|
"loss": 3.6031, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 4.692291311754685e-07, |
|
"loss": 3.5581, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 3.095187393526406e-07, |
|
"loss": 3.5873, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 4.98, |
|
"learning_rate": 1.4980834752981263e-07, |
|
"loss": 3.5911, |
|
"step": 93500 |
|
} |
|
], |
|
"max_steps": 93920, |
|
"num_train_epochs": 5, |
|
"total_flos": 1.7064702150967296e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|