|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0023866348448687, |
|
"eval_steps": 27, |
|
"global_step": 105, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00954653937947494, |
|
"grad_norm": 0.5416718125343323, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.8067, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00954653937947494, |
|
"eval_loss": 0.8745781183242798, |
|
"eval_runtime": 23.7103, |
|
"eval_samples_per_second": 7.465, |
|
"eval_steps_per_second": 0.97, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01909307875894988, |
|
"grad_norm": 0.34453752636909485, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.8173, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.028639618138424822, |
|
"grad_norm": 0.4350746273994446, |
|
"learning_rate": 6e-06, |
|
"loss": 0.8136, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03818615751789976, |
|
"grad_norm": 0.32216551899909973, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.8387, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0477326968973747, |
|
"grad_norm": 0.6193002462387085, |
|
"learning_rate": 1e-05, |
|
"loss": 0.8368, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.057279236276849645, |
|
"grad_norm": 0.3906833529472351, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.8408, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06682577565632458, |
|
"grad_norm": 0.43147146701812744, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 0.8195, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07637231503579953, |
|
"grad_norm": 0.300281286239624, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.8286, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.08591885441527446, |
|
"grad_norm": 0.36642926931381226, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.9017, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0954653937947494, |
|
"grad_norm": 0.39945727586746216, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0405, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.10501193317422435, |
|
"grad_norm": 0.4177384078502655, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 0.7997, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.11455847255369929, |
|
"grad_norm": 0.6147989630699158, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.8825, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.12410501193317422, |
|
"grad_norm": 0.29250773787498474, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.8231, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.13365155131264916, |
|
"grad_norm": 0.4127359092235565, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 0.8333, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.1431980906921241, |
|
"grad_norm": 0.3746739327907562, |
|
"learning_rate": 3e-05, |
|
"loss": 0.9095, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.15274463007159905, |
|
"grad_norm": 0.4909857213497162, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.9133, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.162291169451074, |
|
"grad_norm": 0.357906699180603, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 0.8572, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1718377088305489, |
|
"grad_norm": 0.533477783203125, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.9061, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.18138424821002386, |
|
"grad_norm": 0.5812142491340637, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.7458, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1909307875894988, |
|
"grad_norm": 0.7447901964187622, |
|
"learning_rate": 4e-05, |
|
"loss": 0.7656, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.20047732696897375, |
|
"grad_norm": 0.5314919948577881, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.7249, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2100238663484487, |
|
"grad_norm": 0.6344147324562073, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.7594, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.21957040572792363, |
|
"grad_norm": 0.3820248246192932, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.7272, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.22911694510739858, |
|
"grad_norm": 0.47990524768829346, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.7684, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2386634844868735, |
|
"grad_norm": 0.5735926032066345, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7445, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.24821002386634844, |
|
"grad_norm": 0.3894128203392029, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 0.8173, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.2577565632458234, |
|
"grad_norm": 0.46286776661872864, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.7138, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2577565632458234, |
|
"eval_loss": 0.7636813521385193, |
|
"eval_runtime": 23.8391, |
|
"eval_samples_per_second": 7.425, |
|
"eval_steps_per_second": 0.965, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.26730310262529833, |
|
"grad_norm": 0.35924747586250305, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 0.7682, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.27684964200477324, |
|
"grad_norm": 0.8549047708511353, |
|
"learning_rate": 5.8e-05, |
|
"loss": 0.7115, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2863961813842482, |
|
"grad_norm": 0.39896008372306824, |
|
"learning_rate": 6e-05, |
|
"loss": 0.8621, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.29594272076372313, |
|
"grad_norm": 0.4116915464401245, |
|
"learning_rate": 6.2e-05, |
|
"loss": 0.682, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3054892601431981, |
|
"grad_norm": 0.33658358454704285, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.7713, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.315035799522673, |
|
"grad_norm": 0.7733840346336365, |
|
"learning_rate": 6.6e-05, |
|
"loss": 0.683, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.324582338902148, |
|
"grad_norm": 0.38960912823677063, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 0.6601, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.3341288782816229, |
|
"grad_norm": 0.5905539989471436, |
|
"learning_rate": 7e-05, |
|
"loss": 0.7086, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3436754176610978, |
|
"grad_norm": 0.5276817083358765, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.7206, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3532219570405728, |
|
"grad_norm": 0.5476186275482178, |
|
"learning_rate": 7.4e-05, |
|
"loss": 0.7032, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3627684964200477, |
|
"grad_norm": 0.5326115489006042, |
|
"learning_rate": 7.6e-05, |
|
"loss": 0.734, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3723150357995227, |
|
"grad_norm": 0.3078496754169464, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 0.7866, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3818615751789976, |
|
"grad_norm": 0.269064724445343, |
|
"learning_rate": 8e-05, |
|
"loss": 0.7215, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3914081145584726, |
|
"grad_norm": 0.30524760484695435, |
|
"learning_rate": 8.2e-05, |
|
"loss": 0.7716, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.4009546539379475, |
|
"grad_norm": 0.29701563715934753, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.7049, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.4105011933174224, |
|
"grad_norm": 0.30442681908607483, |
|
"learning_rate": 8.6e-05, |
|
"loss": 0.7702, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.4200477326968974, |
|
"grad_norm": 0.2677440345287323, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.6415, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.4295942720763723, |
|
"grad_norm": 0.4278031289577484, |
|
"learning_rate": 9e-05, |
|
"loss": 0.681, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.43914081145584727, |
|
"grad_norm": 0.30344948172569275, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.7682, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.4486873508353222, |
|
"grad_norm": 0.28526848554611206, |
|
"learning_rate": 9.4e-05, |
|
"loss": 0.6851, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.45823389021479716, |
|
"grad_norm": 0.2424936592578888, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.6337, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4677804295942721, |
|
"grad_norm": 0.44285890460014343, |
|
"learning_rate": 9.8e-05, |
|
"loss": 0.7426, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.477326968973747, |
|
"grad_norm": 0.7494658827781677, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7526, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.48687350835322196, |
|
"grad_norm": 0.359139621257782, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 0.7343, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.4964200477326969, |
|
"grad_norm": 0.3556593358516693, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 0.7711, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.5059665871121718, |
|
"grad_norm": 0.3111443817615509, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.7054, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.5155131264916468, |
|
"grad_norm": 0.3471980690956116, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 0.7179, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5155131264916468, |
|
"eval_loss": 0.7270187139511108, |
|
"eval_runtime": 23.8463, |
|
"eval_samples_per_second": 7.423, |
|
"eval_steps_per_second": 0.965, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.5250596658711217, |
|
"grad_norm": 0.3021623492240906, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.7227, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.5346062052505967, |
|
"grad_norm": 0.391401082277298, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 0.6911, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.5441527446300716, |
|
"grad_norm": 0.6585782170295715, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.6337, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.5536992840095465, |
|
"grad_norm": 0.35819312930107117, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 0.8028, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.5632458233890215, |
|
"grad_norm": 0.34819847345352173, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 0.6767, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5727923627684964, |
|
"grad_norm": 0.2741661071777344, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.6573, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5823389021479713, |
|
"grad_norm": 0.37481749057769775, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.7067, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5918854415274463, |
|
"grad_norm": 0.27204594016075134, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 0.6636, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.6014319809069213, |
|
"grad_norm": 3.2921030521392822, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 0.7162, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.6109785202863962, |
|
"grad_norm": 0.34250608086586, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 0.7088, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.6205250596658711, |
|
"grad_norm": 0.412442147731781, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.6562, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.630071599045346, |
|
"grad_norm": 0.2742518186569214, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 0.7462, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.639618138424821, |
|
"grad_norm": 0.28374218940734863, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 0.727, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.649164677804296, |
|
"grad_norm": 0.36977893114089966, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 0.7014, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.6587112171837709, |
|
"grad_norm": 0.3121008574962616, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 0.7509, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.6682577565632458, |
|
"grad_norm": 0.3306713402271271, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.7309, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6778042959427207, |
|
"grad_norm": 0.4980546832084656, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 0.7188, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.6873508353221957, |
|
"grad_norm": 0.36510801315307617, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.7224, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6968973747016707, |
|
"grad_norm": 0.3253666162490845, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 0.7039, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.7064439140811456, |
|
"grad_norm": 0.46175265312194824, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 0.7333, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.7159904534606205, |
|
"grad_norm": 0.27166417241096497, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.7273, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.7255369928400954, |
|
"grad_norm": 0.25115492939949036, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 0.7164, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.7350835322195705, |
|
"grad_norm": 0.3494699001312256, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 0.8032, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.7446300715990454, |
|
"grad_norm": 0.31800737977027893, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 0.7018, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.7541766109785203, |
|
"grad_norm": 0.44433706998825073, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 0.7192, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.7637231503579952, |
|
"grad_norm": 0.33603635430336, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.66, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.7732696897374701, |
|
"grad_norm": 0.25413641333580017, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 0.5788, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7732696897374701, |
|
"eval_loss": 0.7156311273574829, |
|
"eval_runtime": 24.2989, |
|
"eval_samples_per_second": 7.284, |
|
"eval_steps_per_second": 0.947, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.7828162291169452, |
|
"grad_norm": 0.5405492782592773, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 0.7513, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.7923627684964201, |
|
"grad_norm": 0.33515989780426025, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.6574, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.801909307875895, |
|
"grad_norm": 0.3084225058555603, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 0.6975, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.8114558472553699, |
|
"grad_norm": 0.3069574534893036, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.6461, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.8210023866348448, |
|
"grad_norm": 1.0965303182601929, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 0.6629, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.8305489260143198, |
|
"grad_norm": 0.36803197860717773, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 0.7792, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.8400954653937948, |
|
"grad_norm": 0.37279656529426575, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 0.7039, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.8496420047732697, |
|
"grad_norm": 0.28656861186027527, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 0.7103, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.8591885441527446, |
|
"grad_norm": 0.805375874042511, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 0.7213, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8687350835322196, |
|
"grad_norm": 0.3051246404647827, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 0.6856, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.8782816229116945, |
|
"grad_norm": 0.6019058227539062, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 0.7098, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.8878281622911695, |
|
"grad_norm": 0.3393521308898926, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 0.8349, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.8973747016706444, |
|
"grad_norm": 0.2818894386291504, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.7454, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.9069212410501193, |
|
"grad_norm": 0.295830637216568, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 0.796, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.9164677804295943, |
|
"grad_norm": 0.3201135993003845, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 0.7365, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.9260143198090692, |
|
"grad_norm": 0.481950968503952, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 0.6355, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.9355608591885441, |
|
"grad_norm": 0.45520639419555664, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 0.6937, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.9451073985680191, |
|
"grad_norm": 0.6622302532196045, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 0.6094, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.954653937947494, |
|
"grad_norm": 0.4538000822067261, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 0.6478, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.964200477326969, |
|
"grad_norm": 0.3268328607082367, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 0.6785, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.9737470167064439, |
|
"grad_norm": 0.24223873019218445, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 0.6202, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.9832935560859188, |
|
"grad_norm": 0.38341718912124634, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 0.7639, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.9928400954653938, |
|
"grad_norm": 0.32395732402801514, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 0.6257, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.0023866348448687, |
|
"grad_norm": 0.4013065993785858, |
|
"learning_rate": 0.0, |
|
"loss": 0.6666, |
|
"step": 105 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 105, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.231853181907763e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|