|
{ |
|
"best_metric": 0.07883252203464508, |
|
"best_model_checkpoint": "segformer-teeth_segment_10ep/checkpoint-1040", |
|
"epoch": 24.761904761904763, |
|
"eval_steps": 20, |
|
"global_step": 1040, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.023809523809523808, |
|
"grad_norm": 3.7683873176574707, |
|
"learning_rate": 5.995238095238095e-05, |
|
"loss": 0.7093, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 3.244874954223633, |
|
"learning_rate": 5.9904761904761905e-05, |
|
"loss": 0.6516, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 3.2348296642303467, |
|
"learning_rate": 5.9857142857142856e-05, |
|
"loss": 0.6236, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 3.209193468093872, |
|
"learning_rate": 5.9809523809523814e-05, |
|
"loss": 0.6066, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.11904761904761904, |
|
"grad_norm": 3.0218300819396973, |
|
"learning_rate": 5.9761904761904766e-05, |
|
"loss": 0.5764, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14285714285714285, |
|
"grad_norm": 2.9458155632019043, |
|
"learning_rate": 5.971428571428572e-05, |
|
"loss": 0.5744, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 3.0408496856689453, |
|
"learning_rate": 5.966666666666667e-05, |
|
"loss": 0.5581, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 2.8450467586517334, |
|
"learning_rate": 5.961904761904762e-05, |
|
"loss": 0.5494, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.21428571428571427, |
|
"grad_norm": 2.5434529781341553, |
|
"learning_rate": 5.957142857142857e-05, |
|
"loss": 0.5207, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.23809523809523808, |
|
"grad_norm": 2.4258580207824707, |
|
"learning_rate": 5.9523809523809524e-05, |
|
"loss": 0.5112, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2619047619047619, |
|
"grad_norm": 2.4317784309387207, |
|
"learning_rate": 5.947619047619048e-05, |
|
"loss": 0.5017, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2857142857142857, |
|
"grad_norm": 2.8235714435577393, |
|
"learning_rate": 5.9428571428571434e-05, |
|
"loss": 0.5308, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.30952380952380953, |
|
"grad_norm": 2.354302406311035, |
|
"learning_rate": 5.9380952380952385e-05, |
|
"loss": 0.496, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3333333333333333, |
|
"grad_norm": 2.3225038051605225, |
|
"learning_rate": 5.933333333333334e-05, |
|
"loss": 0.49, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 2.1641042232513428, |
|
"learning_rate": 5.928571428571429e-05, |
|
"loss": 0.482, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 2.4745819568634033, |
|
"learning_rate": 5.923809523809524e-05, |
|
"loss": 0.4914, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.40476190476190477, |
|
"grad_norm": 2.144455909729004, |
|
"learning_rate": 5.91904761904762e-05, |
|
"loss": 0.4638, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.42857142857142855, |
|
"grad_norm": 2.0155744552612305, |
|
"learning_rate": 5.914285714285715e-05, |
|
"loss": 0.4498, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4523809523809524, |
|
"grad_norm": 2.136814594268799, |
|
"learning_rate": 5.90952380952381e-05, |
|
"loss": 0.4435, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"grad_norm": 2.1965997219085693, |
|
"learning_rate": 5.9047619047619046e-05, |
|
"loss": 0.4436, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.47619047619047616, |
|
"eval_dice_score": 0.8391185233064109, |
|
"eval_loss": 0.5746188163757324, |
|
"eval_runtime": 9.622, |
|
"eval_samples_per_second": 9.25, |
|
"eval_steps_per_second": 0.935, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 1.982168197631836, |
|
"learning_rate": 5.9e-05, |
|
"loss": 0.4275, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5238095238095238, |
|
"grad_norm": 2.1209304332733154, |
|
"learning_rate": 5.895238095238095e-05, |
|
"loss": 0.447, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5476190476190477, |
|
"grad_norm": 2.017097234725952, |
|
"learning_rate": 5.89047619047619e-05, |
|
"loss": 0.4322, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 1.9348403215408325, |
|
"learning_rate": 5.885714285714286e-05, |
|
"loss": 0.4141, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.5952380952380952, |
|
"grad_norm": 2.1177632808685303, |
|
"learning_rate": 5.880952380952381e-05, |
|
"loss": 0.4359, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6190476190476191, |
|
"grad_norm": 2.4067323207855225, |
|
"learning_rate": 5.876190476190476e-05, |
|
"loss": 0.4383, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6428571428571429, |
|
"grad_norm": 2.0675342082977295, |
|
"learning_rate": 5.8714285714285714e-05, |
|
"loss": 0.4188, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 1.8709311485290527, |
|
"learning_rate": 5.8666666666666665e-05, |
|
"loss": 0.4079, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6904761904761905, |
|
"grad_norm": 2.5608415603637695, |
|
"learning_rate": 5.861904761904762e-05, |
|
"loss": 0.4373, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 2.411015510559082, |
|
"learning_rate": 5.857142857142857e-05, |
|
"loss": 0.3953, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7380952380952381, |
|
"grad_norm": 1.9586458206176758, |
|
"learning_rate": 5.8523809523809527e-05, |
|
"loss": 0.3895, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 2.0025196075439453, |
|
"learning_rate": 5.847619047619048e-05, |
|
"loss": 0.3839, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.7857142857142857, |
|
"grad_norm": 1.836198329925537, |
|
"learning_rate": 5.842857142857143e-05, |
|
"loss": 0.3911, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8095238095238095, |
|
"grad_norm": 1.9035793542861938, |
|
"learning_rate": 5.838095238095238e-05, |
|
"loss": 0.3941, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.8333333333333334, |
|
"grad_norm": 1.7556637525558472, |
|
"learning_rate": 5.833333333333333e-05, |
|
"loss": 0.3767, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8571428571428571, |
|
"grad_norm": 2.189488649368286, |
|
"learning_rate": 5.8285714285714284e-05, |
|
"loss": 0.4038, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.8809523809523809, |
|
"grad_norm": 1.6118512153625488, |
|
"learning_rate": 5.8238095238095236e-05, |
|
"loss": 0.3516, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.9047619047619048, |
|
"grad_norm": 1.7305861711502075, |
|
"learning_rate": 5.8190476190476194e-05, |
|
"loss": 0.3606, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.9285714285714286, |
|
"grad_norm": 1.6623855829238892, |
|
"learning_rate": 5.8142857142857146e-05, |
|
"loss": 0.3472, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.6707160472869873, |
|
"learning_rate": 5.80952380952381e-05, |
|
"loss": 0.3473, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"eval_dice_score": 0.8541455296072445, |
|
"eval_loss": 0.43145105242729187, |
|
"eval_runtime": 8.8181, |
|
"eval_samples_per_second": 10.093, |
|
"eval_steps_per_second": 1.021, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9761904761904762, |
|
"grad_norm": 1.6397485733032227, |
|
"learning_rate": 5.804761904761905e-05, |
|
"loss": 0.342, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 2.444227457046509, |
|
"learning_rate": 5.8e-05, |
|
"loss": 0.3436, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.0238095238095237, |
|
"grad_norm": 1.478361964225769, |
|
"learning_rate": 5.795238095238095e-05, |
|
"loss": 0.3369, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0476190476190477, |
|
"grad_norm": 1.642811894416809, |
|
"learning_rate": 5.790476190476191e-05, |
|
"loss": 0.3313, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 1.4398586750030518, |
|
"learning_rate": 5.785714285714286e-05, |
|
"loss": 0.319, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.0952380952380953, |
|
"grad_norm": 1.407219648361206, |
|
"learning_rate": 5.780952380952381e-05, |
|
"loss": 0.3163, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.119047619047619, |
|
"grad_norm": 1.5634286403656006, |
|
"learning_rate": 5.7761904761904765e-05, |
|
"loss": 0.3175, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 1.5995242595672607, |
|
"learning_rate": 5.7714285714285716e-05, |
|
"loss": 0.3245, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.1666666666666667, |
|
"grad_norm": 1.4526621103286743, |
|
"learning_rate": 5.766666666666667e-05, |
|
"loss": 0.3046, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.1904761904761905, |
|
"grad_norm": 2.028379201889038, |
|
"learning_rate": 5.761904761904762e-05, |
|
"loss": 0.3497, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2142857142857142, |
|
"grad_norm": 1.4727197885513306, |
|
"learning_rate": 5.757142857142858e-05, |
|
"loss": 0.3052, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.2380952380952381, |
|
"grad_norm": 1.344086766242981, |
|
"learning_rate": 5.752380952380953e-05, |
|
"loss": 0.2915, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.2619047619047619, |
|
"grad_norm": 2.209981679916382, |
|
"learning_rate": 5.747619047619048e-05, |
|
"loss": 0.3355, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.2857142857142856, |
|
"grad_norm": 1.6532037258148193, |
|
"learning_rate": 5.742857142857143e-05, |
|
"loss": 0.3161, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.3095238095238095, |
|
"grad_norm": 1.2978978157043457, |
|
"learning_rate": 5.7380952380952384e-05, |
|
"loss": 0.2843, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 1.4890028238296509, |
|
"learning_rate": 5.7333333333333336e-05, |
|
"loss": 0.2989, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.3571428571428572, |
|
"grad_norm": 1.4468064308166504, |
|
"learning_rate": 5.728571428571429e-05, |
|
"loss": 0.2874, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.380952380952381, |
|
"grad_norm": 1.2638118267059326, |
|
"learning_rate": 5.7238095238095245e-05, |
|
"loss": 0.2877, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.4047619047619047, |
|
"grad_norm": 1.3178492784500122, |
|
"learning_rate": 5.71904761904762e-05, |
|
"loss": 0.2832, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 1.2155193090438843, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 0.2772, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"eval_dice_score": 0.8701758660308139, |
|
"eval_loss": 0.2781553268432617, |
|
"eval_runtime": 8.8606, |
|
"eval_samples_per_second": 10.045, |
|
"eval_steps_per_second": 1.016, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.4523809523809523, |
|
"grad_norm": 1.8164597749710083, |
|
"learning_rate": 5.709523809523809e-05, |
|
"loss": 0.2814, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.4761904761904763, |
|
"grad_norm": 1.2323110103607178, |
|
"learning_rate": 5.7047619047619045e-05, |
|
"loss": 0.2722, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 1.6265512704849243, |
|
"learning_rate": 5.6999999999999996e-05, |
|
"loss": 0.3031, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 1.2476142644882202, |
|
"learning_rate": 5.6952380952380955e-05, |
|
"loss": 0.274, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.5476190476190477, |
|
"grad_norm": 1.3125485181808472, |
|
"learning_rate": 5.6904761904761906e-05, |
|
"loss": 0.2646, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.5714285714285714, |
|
"grad_norm": 1.1948935985565186, |
|
"learning_rate": 5.685714285714286e-05, |
|
"loss": 0.2549, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.5952380952380953, |
|
"grad_norm": 1.230567216873169, |
|
"learning_rate": 5.680952380952381e-05, |
|
"loss": 0.2757, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.619047619047619, |
|
"grad_norm": 1.2908153533935547, |
|
"learning_rate": 5.676190476190476e-05, |
|
"loss": 0.2642, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.6428571428571428, |
|
"grad_norm": 1.659945011138916, |
|
"learning_rate": 5.671428571428571e-05, |
|
"loss": 0.3091, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.6666666666666665, |
|
"grad_norm": 1.4245527982711792, |
|
"learning_rate": 5.6666666666666664e-05, |
|
"loss": 0.2727, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.6904761904761905, |
|
"grad_norm": 1.1477965116500854, |
|
"learning_rate": 5.661904761904762e-05, |
|
"loss": 0.2566, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 1.1955633163452148, |
|
"learning_rate": 5.6571428571428574e-05, |
|
"loss": 0.2478, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.7380952380952381, |
|
"grad_norm": 1.092235803604126, |
|
"learning_rate": 5.6523809523809525e-05, |
|
"loss": 0.2653, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.7619047619047619, |
|
"grad_norm": 1.1495394706726074, |
|
"learning_rate": 5.647619047619048e-05, |
|
"loss": 0.2476, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.7857142857142856, |
|
"grad_norm": 1.333282232284546, |
|
"learning_rate": 5.642857142857143e-05, |
|
"loss": 0.2454, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.8095238095238095, |
|
"grad_norm": 1.0368735790252686, |
|
"learning_rate": 5.638095238095238e-05, |
|
"loss": 0.2528, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.8333333333333335, |
|
"grad_norm": 1.383416771888733, |
|
"learning_rate": 5.633333333333333e-05, |
|
"loss": 0.266, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.8571428571428572, |
|
"grad_norm": 1.0640007257461548, |
|
"learning_rate": 5.628571428571429e-05, |
|
"loss": 0.2319, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.880952380952381, |
|
"grad_norm": 1.1098486185073853, |
|
"learning_rate": 5.623809523809524e-05, |
|
"loss": 0.2473, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 0.9411126971244812, |
|
"learning_rate": 5.619047619047619e-05, |
|
"loss": 0.2367, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"eval_dice_score": 0.8800905080784573, |
|
"eval_loss": 0.2220260500907898, |
|
"eval_runtime": 8.9078, |
|
"eval_samples_per_second": 9.991, |
|
"eval_steps_per_second": 1.01, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.9285714285714286, |
|
"grad_norm": 1.6729662418365479, |
|
"learning_rate": 5.6142857142857145e-05, |
|
"loss": 0.262, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.9523809523809523, |
|
"grad_norm": 1.0404990911483765, |
|
"learning_rate": 5.6095238095238096e-05, |
|
"loss": 0.2283, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.9761904761904763, |
|
"grad_norm": 1.1858619451522827, |
|
"learning_rate": 5.604761904761905e-05, |
|
"loss": 0.2301, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.9727345108985901, |
|
"learning_rate": 5.6e-05, |
|
"loss": 0.2281, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.0238095238095237, |
|
"grad_norm": 1.208103895187378, |
|
"learning_rate": 5.595238095238096e-05, |
|
"loss": 0.2194, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.0476190476190474, |
|
"grad_norm": 1.3448512554168701, |
|
"learning_rate": 5.590476190476191e-05, |
|
"loss": 0.2182, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.0714285714285716, |
|
"grad_norm": 0.9734125137329102, |
|
"learning_rate": 5.585714285714286e-05, |
|
"loss": 0.2299, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.0952380952380953, |
|
"grad_norm": 0.9757384657859802, |
|
"learning_rate": 5.580952380952381e-05, |
|
"loss": 0.225, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 2.119047619047619, |
|
"grad_norm": 1.3605905771255493, |
|
"learning_rate": 5.5761904761904764e-05, |
|
"loss": 0.2237, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 2.142857142857143, |
|
"grad_norm": 2.0481603145599365, |
|
"learning_rate": 5.5714285714285715e-05, |
|
"loss": 0.2233, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.1666666666666665, |
|
"grad_norm": 1.8589457273483276, |
|
"learning_rate": 5.5666666666666674e-05, |
|
"loss": 0.2201, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 2.1904761904761907, |
|
"grad_norm": 0.9307376742362976, |
|
"learning_rate": 5.5619047619047625e-05, |
|
"loss": 0.2077, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 2.2142857142857144, |
|
"grad_norm": 0.9198131561279297, |
|
"learning_rate": 5.557142857142858e-05, |
|
"loss": 0.2205, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 2.238095238095238, |
|
"grad_norm": 0.9321783781051636, |
|
"learning_rate": 5.552380952380953e-05, |
|
"loss": 0.2054, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 2.261904761904762, |
|
"grad_norm": 1.1142381429672241, |
|
"learning_rate": 5.547619047619048e-05, |
|
"loss": 0.2356, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 0.9719504714012146, |
|
"learning_rate": 5.542857142857143e-05, |
|
"loss": 0.2095, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 2.3095238095238093, |
|
"grad_norm": 0.9777172207832336, |
|
"learning_rate": 5.538095238095238e-05, |
|
"loss": 0.2025, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 2.3333333333333335, |
|
"grad_norm": 1.0100057125091553, |
|
"learning_rate": 5.533333333333334e-05, |
|
"loss": 0.2022, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.357142857142857, |
|
"grad_norm": 0.8255928754806519, |
|
"learning_rate": 5.5285714285714286e-05, |
|
"loss": 0.2161, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"grad_norm": 0.8491964936256409, |
|
"learning_rate": 5.523809523809524e-05, |
|
"loss": 0.1945, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.380952380952381, |
|
"eval_dice_score": 0.8890805782568201, |
|
"eval_loss": 0.1874212622642517, |
|
"eval_runtime": 9.0005, |
|
"eval_samples_per_second": 9.888, |
|
"eval_steps_per_second": 1.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.4047619047619047, |
|
"grad_norm": 1.0802334547042847, |
|
"learning_rate": 5.519047619047619e-05, |
|
"loss": 0.1996, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 2.4285714285714284, |
|
"grad_norm": 1.2004603147506714, |
|
"learning_rate": 5.514285714285714e-05, |
|
"loss": 0.218, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 2.4523809523809526, |
|
"grad_norm": 1.187143325805664, |
|
"learning_rate": 5.509523809523809e-05, |
|
"loss": 0.2213, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 2.4761904761904763, |
|
"grad_norm": 0.7938065528869629, |
|
"learning_rate": 5.5047619047619044e-05, |
|
"loss": 0.1995, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.8641159534454346, |
|
"learning_rate": 5.5e-05, |
|
"loss": 0.2353, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.5238095238095237, |
|
"grad_norm": 0.8983684182167053, |
|
"learning_rate": 5.4952380952380954e-05, |
|
"loss": 0.192, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 2.5476190476190474, |
|
"grad_norm": 0.730816125869751, |
|
"learning_rate": 5.4904761904761905e-05, |
|
"loss": 0.1958, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 2.571428571428571, |
|
"grad_norm": 0.7313885688781738, |
|
"learning_rate": 5.4857142857142857e-05, |
|
"loss": 0.2031, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 2.5952380952380953, |
|
"grad_norm": 0.8542109131813049, |
|
"learning_rate": 5.480952380952381e-05, |
|
"loss": 0.182, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 2.619047619047619, |
|
"grad_norm": 0.8531911373138428, |
|
"learning_rate": 5.476190476190476e-05, |
|
"loss": 0.2021, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.642857142857143, |
|
"grad_norm": 0.8808167576789856, |
|
"learning_rate": 5.471428571428571e-05, |
|
"loss": 0.1899, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 1.2466603517532349, |
|
"learning_rate": 5.466666666666667e-05, |
|
"loss": 0.2161, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 2.6904761904761907, |
|
"grad_norm": 0.8664019107818604, |
|
"learning_rate": 5.461904761904762e-05, |
|
"loss": 0.2048, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 2.7142857142857144, |
|
"grad_norm": 1.0919655561447144, |
|
"learning_rate": 5.457142857142857e-05, |
|
"loss": 0.2008, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 2.738095238095238, |
|
"grad_norm": 0.8462991118431091, |
|
"learning_rate": 5.4523809523809524e-05, |
|
"loss": 0.1982, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.761904761904762, |
|
"grad_norm": 0.7836592197418213, |
|
"learning_rate": 5.4476190476190476e-05, |
|
"loss": 0.1844, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 2.7857142857142856, |
|
"grad_norm": 1.0157865285873413, |
|
"learning_rate": 5.442857142857143e-05, |
|
"loss": 0.1879, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 2.8095238095238093, |
|
"grad_norm": 0.7947904467582703, |
|
"learning_rate": 5.4380952380952386e-05, |
|
"loss": 0.1813, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 2.8333333333333335, |
|
"grad_norm": 0.8740283846855164, |
|
"learning_rate": 5.433333333333334e-05, |
|
"loss": 0.1743, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 0.9798213243484497, |
|
"learning_rate": 5.428571428571429e-05, |
|
"loss": 0.1768, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"eval_dice_score": 0.8953037523457541, |
|
"eval_loss": 0.1698274165391922, |
|
"eval_runtime": 9.057, |
|
"eval_samples_per_second": 9.827, |
|
"eval_steps_per_second": 0.994, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.880952380952381, |
|
"grad_norm": 0.9734324812889099, |
|
"learning_rate": 5.423809523809524e-05, |
|
"loss": 0.2079, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 2.9047619047619047, |
|
"grad_norm": 0.6516209244728088, |
|
"learning_rate": 5.419047619047619e-05, |
|
"loss": 0.1745, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 2.928571428571429, |
|
"grad_norm": 0.7925631999969482, |
|
"learning_rate": 5.414285714285714e-05, |
|
"loss": 0.1802, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 2.9523809523809526, |
|
"grad_norm": 0.9028489589691162, |
|
"learning_rate": 5.4095238095238095e-05, |
|
"loss": 0.1625, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 2.9761904761904763, |
|
"grad_norm": 0.70108562707901, |
|
"learning_rate": 5.404761904761905e-05, |
|
"loss": 0.1718, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.7561673521995544, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 0.1767, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 3.0238095238095237, |
|
"grad_norm": 0.8195943832397461, |
|
"learning_rate": 5.3952380952380956e-05, |
|
"loss": 0.1848, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 3.0476190476190474, |
|
"grad_norm": 0.7275943756103516, |
|
"learning_rate": 5.390476190476191e-05, |
|
"loss": 0.1706, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 3.0714285714285716, |
|
"grad_norm": 0.9391573071479797, |
|
"learning_rate": 5.385714285714286e-05, |
|
"loss": 0.1782, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 3.0952380952380953, |
|
"grad_norm": 0.8863614201545715, |
|
"learning_rate": 5.380952380952381e-05, |
|
"loss": 0.1732, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.119047619047619, |
|
"grad_norm": 0.9662719368934631, |
|
"learning_rate": 5.376190476190476e-05, |
|
"loss": 0.1547, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 3.142857142857143, |
|
"grad_norm": 0.6543298363685608, |
|
"learning_rate": 5.371428571428572e-05, |
|
"loss": 0.1682, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 3.1666666666666665, |
|
"grad_norm": 0.5799964070320129, |
|
"learning_rate": 5.366666666666667e-05, |
|
"loss": 0.1807, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 3.1904761904761907, |
|
"grad_norm": 0.8333367705345154, |
|
"learning_rate": 5.3619047619047624e-05, |
|
"loss": 0.1669, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 3.2142857142857144, |
|
"grad_norm": 0.886374831199646, |
|
"learning_rate": 5.3571428571428575e-05, |
|
"loss": 0.1904, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 3.238095238095238, |
|
"grad_norm": 0.7203497886657715, |
|
"learning_rate": 5.352380952380953e-05, |
|
"loss": 0.1707, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 3.261904761904762, |
|
"grad_norm": 0.7457689046859741, |
|
"learning_rate": 5.347619047619048e-05, |
|
"loss": 0.1644, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 3.2857142857142856, |
|
"grad_norm": 1.0430240631103516, |
|
"learning_rate": 5.342857142857142e-05, |
|
"loss": 0.1887, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 3.3095238095238093, |
|
"grad_norm": 0.764371931552887, |
|
"learning_rate": 5.338095238095238e-05, |
|
"loss": 0.1877, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"grad_norm": 0.6030626893043518, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 0.1616, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.3333333333333335, |
|
"eval_dice_score": 0.8981800042364196, |
|
"eval_loss": 0.148634672164917, |
|
"eval_runtime": 8.7776, |
|
"eval_samples_per_second": 10.139, |
|
"eval_steps_per_second": 1.025, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.357142857142857, |
|
"grad_norm": 0.5721175074577332, |
|
"learning_rate": 5.3285714285714285e-05, |
|
"loss": 0.1484, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 3.380952380952381, |
|
"grad_norm": 0.5327512621879578, |
|
"learning_rate": 5.3238095238095236e-05, |
|
"loss": 0.1787, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 3.4047619047619047, |
|
"grad_norm": 0.6115806698799133, |
|
"learning_rate": 5.319047619047619e-05, |
|
"loss": 0.1689, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 3.4285714285714284, |
|
"grad_norm": 0.6276452541351318, |
|
"learning_rate": 5.314285714285714e-05, |
|
"loss": 0.1657, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 3.4523809523809526, |
|
"grad_norm": 0.8162292838096619, |
|
"learning_rate": 5.30952380952381e-05, |
|
"loss": 0.1554, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 3.4761904761904763, |
|
"grad_norm": 0.6978632211685181, |
|
"learning_rate": 5.304761904761905e-05, |
|
"loss": 0.1632, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 0.866811990737915, |
|
"learning_rate": 5.3e-05, |
|
"loss": 0.1835, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 3.5238095238095237, |
|
"grad_norm": 0.625978410243988, |
|
"learning_rate": 5.295238095238095e-05, |
|
"loss": 0.1505, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.5476190476190474, |
|
"grad_norm": 0.5491347908973694, |
|
"learning_rate": 5.2904761904761904e-05, |
|
"loss": 0.1485, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 3.571428571428571, |
|
"grad_norm": 0.62246173620224, |
|
"learning_rate": 5.2857142857142855e-05, |
|
"loss": 0.1534, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.5952380952380953, |
|
"grad_norm": 0.5550210475921631, |
|
"learning_rate": 5.280952380952381e-05, |
|
"loss": 0.1489, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 3.619047619047619, |
|
"grad_norm": 0.6564983129501343, |
|
"learning_rate": 5.2761904761904765e-05, |
|
"loss": 0.1616, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 3.642857142857143, |
|
"grad_norm": 0.7625710368156433, |
|
"learning_rate": 5.271428571428572e-05, |
|
"loss": 0.1382, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 3.6666666666666665, |
|
"grad_norm": 0.6809992790222168, |
|
"learning_rate": 5.266666666666667e-05, |
|
"loss": 0.1593, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 3.6904761904761907, |
|
"grad_norm": 0.5852124094963074, |
|
"learning_rate": 5.261904761904762e-05, |
|
"loss": 0.1675, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.7142857142857144, |
|
"grad_norm": 0.5885810256004333, |
|
"learning_rate": 5.257142857142857e-05, |
|
"loss": 0.1443, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 3.738095238095238, |
|
"grad_norm": 0.8580813407897949, |
|
"learning_rate": 5.252380952380952e-05, |
|
"loss": 0.1471, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 3.761904761904762, |
|
"grad_norm": 0.7032930254936218, |
|
"learning_rate": 5.2476190476190475e-05, |
|
"loss": 0.1403, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 3.7857142857142856, |
|
"grad_norm": 0.6581481099128723, |
|
"learning_rate": 5.242857142857143e-05, |
|
"loss": 0.1555, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"grad_norm": 0.9175595641136169, |
|
"learning_rate": 5.2380952380952384e-05, |
|
"loss": 0.1749, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.8095238095238093, |
|
"eval_dice_score": 0.9037976009755364, |
|
"eval_loss": 0.1529674082994461, |
|
"eval_runtime": 8.7408, |
|
"eval_samples_per_second": 10.182, |
|
"eval_steps_per_second": 1.03, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.8333333333333335, |
|
"grad_norm": 0.5896248817443848, |
|
"learning_rate": 5.2333333333333336e-05, |
|
"loss": 0.1709, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 3.857142857142857, |
|
"grad_norm": 0.5807002782821655, |
|
"learning_rate": 5.228571428571429e-05, |
|
"loss": 0.1436, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 3.880952380952381, |
|
"grad_norm": 0.6526125073432922, |
|
"learning_rate": 5.223809523809524e-05, |
|
"loss": 0.1327, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 3.9047619047619047, |
|
"grad_norm": 0.5510716438293457, |
|
"learning_rate": 5.219047619047619e-05, |
|
"loss": 0.1627, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 3.928571428571429, |
|
"grad_norm": 0.5205186009407043, |
|
"learning_rate": 5.214285714285714e-05, |
|
"loss": 0.1322, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.9523809523809526, |
|
"grad_norm": 0.5087710618972778, |
|
"learning_rate": 5.20952380952381e-05, |
|
"loss": 0.1546, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 3.9761904761904763, |
|
"grad_norm": 0.5551671981811523, |
|
"learning_rate": 5.204761904761905e-05, |
|
"loss": 0.1584, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.6679753065109253, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 0.1515, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 4.023809523809524, |
|
"grad_norm": 0.547113299369812, |
|
"learning_rate": 5.1952380952380955e-05, |
|
"loss": 0.1424, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 4.0476190476190474, |
|
"grad_norm": 0.43713295459747314, |
|
"learning_rate": 5.190476190476191e-05, |
|
"loss": 0.1534, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.071428571428571, |
|
"grad_norm": 0.5712536573410034, |
|
"learning_rate": 5.185714285714286e-05, |
|
"loss": 0.1674, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 4.095238095238095, |
|
"grad_norm": 0.5892536044120789, |
|
"learning_rate": 5.1809523809523817e-05, |
|
"loss": 0.1374, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 4.119047619047619, |
|
"grad_norm": 0.5541051626205444, |
|
"learning_rate": 5.176190476190477e-05, |
|
"loss": 0.154, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 4.142857142857143, |
|
"grad_norm": 0.6557343602180481, |
|
"learning_rate": 5.171428571428572e-05, |
|
"loss": 0.1421, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 4.166666666666667, |
|
"grad_norm": 0.48764219880104065, |
|
"learning_rate": 5.166666666666667e-05, |
|
"loss": 0.1351, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 4.190476190476191, |
|
"grad_norm": 0.4162357747554779, |
|
"learning_rate": 5.161904761904762e-05, |
|
"loss": 0.1515, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 4.214285714285714, |
|
"grad_norm": 0.5261713862419128, |
|
"learning_rate": 5.1571428571428574e-05, |
|
"loss": 0.1518, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 4.238095238095238, |
|
"grad_norm": 0.5337617993354797, |
|
"learning_rate": 5.152380952380952e-05, |
|
"loss": 0.1217, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 4.261904761904762, |
|
"grad_norm": 0.6286489963531494, |
|
"learning_rate": 5.147619047619048e-05, |
|
"loss": 0.1409, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"grad_norm": 0.6179653406143188, |
|
"learning_rate": 5.142857142857143e-05, |
|
"loss": 0.1358, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.285714285714286, |
|
"eval_dice_score": 0.9069158827058441, |
|
"eval_loss": 0.13152673840522766, |
|
"eval_runtime": 8.7958, |
|
"eval_samples_per_second": 10.118, |
|
"eval_steps_per_second": 1.023, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.309523809523809, |
|
"grad_norm": 0.4932554066181183, |
|
"learning_rate": 5.138095238095238e-05, |
|
"loss": 0.1561, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 4.333333333333333, |
|
"grad_norm": 0.5466799139976501, |
|
"learning_rate": 5.133333333333333e-05, |
|
"loss": 0.1548, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 4.357142857142857, |
|
"grad_norm": 0.44180309772491455, |
|
"learning_rate": 5.1285714285714284e-05, |
|
"loss": 0.1482, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 4.380952380952381, |
|
"grad_norm": 0.7849823832511902, |
|
"learning_rate": 5.1238095238095235e-05, |
|
"loss": 0.1794, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 4.404761904761905, |
|
"grad_norm": 0.406508207321167, |
|
"learning_rate": 5.119047619047619e-05, |
|
"loss": 0.1294, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 4.428571428571429, |
|
"grad_norm": 0.8998963236808777, |
|
"learning_rate": 5.1142857142857145e-05, |
|
"loss": 0.1358, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 4.4523809523809526, |
|
"grad_norm": 0.41310542821884155, |
|
"learning_rate": 5.1095238095238097e-05, |
|
"loss": 0.134, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 4.476190476190476, |
|
"grad_norm": 0.41451480984687805, |
|
"learning_rate": 5.104761904761905e-05, |
|
"loss": 0.1419, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 0.5282092094421387, |
|
"learning_rate": 5.1e-05, |
|
"loss": 0.136, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 4.523809523809524, |
|
"grad_norm": 0.5178981423377991, |
|
"learning_rate": 5.095238095238095e-05, |
|
"loss": 0.1274, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.5476190476190474, |
|
"grad_norm": 0.5173990726470947, |
|
"learning_rate": 5.09047619047619e-05, |
|
"loss": 0.1391, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 4.571428571428571, |
|
"grad_norm": 0.479457825422287, |
|
"learning_rate": 5.0857142857142854e-05, |
|
"loss": 0.1332, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 4.595238095238095, |
|
"grad_norm": 0.4143787622451782, |
|
"learning_rate": 5.080952380952381e-05, |
|
"loss": 0.1571, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 4.619047619047619, |
|
"grad_norm": 0.4726862609386444, |
|
"learning_rate": 5.0761904761904764e-05, |
|
"loss": 0.1373, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 4.642857142857143, |
|
"grad_norm": 0.42671865224838257, |
|
"learning_rate": 5.0714285714285716e-05, |
|
"loss": 0.1316, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 4.666666666666667, |
|
"grad_norm": 0.5958961844444275, |
|
"learning_rate": 5.066666666666667e-05, |
|
"loss": 0.1563, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 4.690476190476191, |
|
"grad_norm": 0.4654116928577423, |
|
"learning_rate": 5.061904761904762e-05, |
|
"loss": 0.1426, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 4.714285714285714, |
|
"grad_norm": 0.6422296762466431, |
|
"learning_rate": 5.057142857142857e-05, |
|
"loss": 0.1278, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 4.738095238095238, |
|
"grad_norm": 0.4659450352191925, |
|
"learning_rate": 5.052380952380953e-05, |
|
"loss": 0.12, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 4.761904761904762, |
|
"grad_norm": 0.4322237968444824, |
|
"learning_rate": 5.047619047619048e-05, |
|
"loss": 0.1166, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.761904761904762, |
|
"eval_dice_score": 0.9074214584101227, |
|
"eval_loss": 0.12160411477088928, |
|
"eval_runtime": 8.9569, |
|
"eval_samples_per_second": 9.937, |
|
"eval_steps_per_second": 1.005, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.785714285714286, |
|
"grad_norm": 0.6910778284072876, |
|
"learning_rate": 5.042857142857143e-05, |
|
"loss": 0.1562, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 4.809523809523809, |
|
"grad_norm": 0.4675683081150055, |
|
"learning_rate": 5.038095238095238e-05, |
|
"loss": 0.1547, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 4.833333333333333, |
|
"grad_norm": 0.4359719753265381, |
|
"learning_rate": 5.0333333333333335e-05, |
|
"loss": 0.1196, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 4.857142857142857, |
|
"grad_norm": 0.4786078929901123, |
|
"learning_rate": 5.0285714285714286e-05, |
|
"loss": 0.1441, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 4.880952380952381, |
|
"grad_norm": 0.5416626334190369, |
|
"learning_rate": 5.023809523809524e-05, |
|
"loss": 0.1219, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 4.904761904761905, |
|
"grad_norm": 0.5497027039527893, |
|
"learning_rate": 5.0190476190476196e-05, |
|
"loss": 0.1221, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 4.928571428571429, |
|
"grad_norm": 0.5406143069267273, |
|
"learning_rate": 5.014285714285715e-05, |
|
"loss": 0.1211, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 4.9523809523809526, |
|
"grad_norm": 0.472383052110672, |
|
"learning_rate": 5.00952380952381e-05, |
|
"loss": 0.1459, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 4.976190476190476, |
|
"grad_norm": 0.4744516611099243, |
|
"learning_rate": 5.004761904761905e-05, |
|
"loss": 0.1488, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.40707042813301086, |
|
"learning_rate": 5e-05, |
|
"loss": 0.1423, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.023809523809524, |
|
"grad_norm": 0.3842790424823761, |
|
"learning_rate": 4.9952380952380954e-05, |
|
"loss": 0.1329, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 5.0476190476190474, |
|
"grad_norm": 0.45681342482566833, |
|
"learning_rate": 4.9904761904761905e-05, |
|
"loss": 0.1366, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 5.071428571428571, |
|
"grad_norm": 0.3685499429702759, |
|
"learning_rate": 4.9857142857142864e-05, |
|
"loss": 0.1247, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 5.095238095238095, |
|
"grad_norm": 0.44698742032051086, |
|
"learning_rate": 4.9809523809523815e-05, |
|
"loss": 0.1252, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 5.119047619047619, |
|
"grad_norm": 0.49723654985427856, |
|
"learning_rate": 4.976190476190477e-05, |
|
"loss": 0.1311, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 5.142857142857143, |
|
"grad_norm": 0.6091992855072021, |
|
"learning_rate": 4.971428571428572e-05, |
|
"loss": 0.1711, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 5.166666666666667, |
|
"grad_norm": 0.5066311955451965, |
|
"learning_rate": 4.966666666666666e-05, |
|
"loss": 0.1323, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 5.190476190476191, |
|
"grad_norm": 0.3507075011730194, |
|
"learning_rate": 4.9619047619047615e-05, |
|
"loss": 0.1135, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 5.214285714285714, |
|
"grad_norm": 0.4396760165691376, |
|
"learning_rate": 4.957142857142857e-05, |
|
"loss": 0.1474, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 5.238095238095238, |
|
"grad_norm": 0.39729997515678406, |
|
"learning_rate": 4.9523809523809525e-05, |
|
"loss": 0.1426, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.238095238095238, |
|
"eval_dice_score": 0.9099840335920737, |
|
"eval_loss": 0.11943376809358597, |
|
"eval_runtime": 8.7914, |
|
"eval_samples_per_second": 10.124, |
|
"eval_steps_per_second": 1.024, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.261904761904762, |
|
"grad_norm": 0.49762511253356934, |
|
"learning_rate": 4.9476190476190476e-05, |
|
"loss": 0.1177, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 5.285714285714286, |
|
"grad_norm": 0.33840247988700867, |
|
"learning_rate": 4.942857142857143e-05, |
|
"loss": 0.1343, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 5.309523809523809, |
|
"grad_norm": 0.3600093722343445, |
|
"learning_rate": 4.938095238095238e-05, |
|
"loss": 0.113, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 5.333333333333333, |
|
"grad_norm": 0.45041218400001526, |
|
"learning_rate": 4.933333333333333e-05, |
|
"loss": 0.1359, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 5.357142857142857, |
|
"grad_norm": 0.3925110697746277, |
|
"learning_rate": 4.928571428571428e-05, |
|
"loss": 0.1125, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 5.380952380952381, |
|
"grad_norm": 0.3413161039352417, |
|
"learning_rate": 4.923809523809524e-05, |
|
"loss": 0.1173, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 5.404761904761905, |
|
"grad_norm": 0.4640910029411316, |
|
"learning_rate": 4.919047619047619e-05, |
|
"loss": 0.1395, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 5.428571428571429, |
|
"grad_norm": 0.44285720586776733, |
|
"learning_rate": 4.9142857142857144e-05, |
|
"loss": 0.1347, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 5.4523809523809526, |
|
"grad_norm": 0.4267982244491577, |
|
"learning_rate": 4.9095238095238095e-05, |
|
"loss": 0.1234, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 5.476190476190476, |
|
"grad_norm": 0.4833822548389435, |
|
"learning_rate": 4.904761904761905e-05, |
|
"loss": 0.1146, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.5, |
|
"grad_norm": 0.4235381782054901, |
|
"learning_rate": 4.9e-05, |
|
"loss": 0.127, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 5.523809523809524, |
|
"grad_norm": 0.36395061016082764, |
|
"learning_rate": 4.895238095238095e-05, |
|
"loss": 0.1132, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 5.5476190476190474, |
|
"grad_norm": 0.3754006326198578, |
|
"learning_rate": 4.890476190476191e-05, |
|
"loss": 0.1476, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 5.571428571428571, |
|
"grad_norm": 0.5128763318061829, |
|
"learning_rate": 4.885714285714286e-05, |
|
"loss": 0.1052, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 5.595238095238095, |
|
"grad_norm": 0.381115198135376, |
|
"learning_rate": 4.880952380952381e-05, |
|
"loss": 0.1229, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 5.619047619047619, |
|
"grad_norm": 0.42561641335487366, |
|
"learning_rate": 4.876190476190476e-05, |
|
"loss": 0.1178, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 5.642857142857143, |
|
"grad_norm": 0.4684426486492157, |
|
"learning_rate": 4.8714285714285714e-05, |
|
"loss": 0.1256, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 5.666666666666667, |
|
"grad_norm": 0.3221814036369324, |
|
"learning_rate": 4.8666666666666666e-05, |
|
"loss": 0.1299, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 5.690476190476191, |
|
"grad_norm": 0.3549591898918152, |
|
"learning_rate": 4.861904761904762e-05, |
|
"loss": 0.1103, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"grad_norm": 0.4589278995990753, |
|
"learning_rate": 4.8571428571428576e-05, |
|
"loss": 0.1199, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.714285714285714, |
|
"eval_dice_score": 0.9114496222860314, |
|
"eval_loss": 0.12006842344999313, |
|
"eval_runtime": 8.774, |
|
"eval_samples_per_second": 10.144, |
|
"eval_steps_per_second": 1.026, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.738095238095238, |
|
"grad_norm": 0.46148115396499634, |
|
"learning_rate": 4.852380952380953e-05, |
|
"loss": 0.1248, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 5.761904761904762, |
|
"grad_norm": 0.535234808921814, |
|
"learning_rate": 4.847619047619048e-05, |
|
"loss": 0.1367, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 5.785714285714286, |
|
"grad_norm": 0.4368140399456024, |
|
"learning_rate": 4.842857142857143e-05, |
|
"loss": 0.1282, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 5.809523809523809, |
|
"grad_norm": 0.41349226236343384, |
|
"learning_rate": 4.838095238095238e-05, |
|
"loss": 0.1089, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 5.833333333333333, |
|
"grad_norm": 0.3823813199996948, |
|
"learning_rate": 4.8333333333333334e-05, |
|
"loss": 0.1409, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 5.857142857142857, |
|
"grad_norm": 0.3816284239292145, |
|
"learning_rate": 4.828571428571429e-05, |
|
"loss": 0.1056, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 5.880952380952381, |
|
"grad_norm": 0.5315767526626587, |
|
"learning_rate": 4.8238095238095243e-05, |
|
"loss": 0.1201, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 5.904761904761905, |
|
"grad_norm": 0.46191665530204773, |
|
"learning_rate": 4.8190476190476195e-05, |
|
"loss": 0.1306, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 5.928571428571429, |
|
"grad_norm": 0.41716304421424866, |
|
"learning_rate": 4.8142857142857147e-05, |
|
"loss": 0.1361, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 5.9523809523809526, |
|
"grad_norm": 0.5382347106933594, |
|
"learning_rate": 4.80952380952381e-05, |
|
"loss": 0.1499, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 5.976190476190476, |
|
"grad_norm": 0.4075874090194702, |
|
"learning_rate": 4.804761904761905e-05, |
|
"loss": 0.1221, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 0.30484116077423096, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.1365, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 6.023809523809524, |
|
"grad_norm": 0.46580827236175537, |
|
"learning_rate": 4.795238095238096e-05, |
|
"loss": 0.1254, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 6.0476190476190474, |
|
"grad_norm": 0.41510361433029175, |
|
"learning_rate": 4.790476190476191e-05, |
|
"loss": 0.1411, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 6.071428571428571, |
|
"grad_norm": 0.37615931034088135, |
|
"learning_rate": 4.785714285714286e-05, |
|
"loss": 0.1101, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 6.095238095238095, |
|
"grad_norm": 0.43453407287597656, |
|
"learning_rate": 4.780952380952381e-05, |
|
"loss": 0.1227, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 6.119047619047619, |
|
"grad_norm": 0.3365417718887329, |
|
"learning_rate": 4.776190476190476e-05, |
|
"loss": 0.1497, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 6.142857142857143, |
|
"grad_norm": 0.3814278244972229, |
|
"learning_rate": 4.771428571428571e-05, |
|
"loss": 0.1164, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 6.166666666666667, |
|
"grad_norm": 0.4330274760723114, |
|
"learning_rate": 4.766666666666666e-05, |
|
"loss": 0.1056, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 6.190476190476191, |
|
"grad_norm": 0.3585191071033478, |
|
"learning_rate": 4.761904761904762e-05, |
|
"loss": 0.142, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.190476190476191, |
|
"eval_dice_score": 0.9102984988362752, |
|
"eval_loss": 0.11125586926937103, |
|
"eval_runtime": 8.8105, |
|
"eval_samples_per_second": 10.102, |
|
"eval_steps_per_second": 1.022, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.214285714285714, |
|
"grad_norm": 0.38072383403778076, |
|
"learning_rate": 4.757142857142857e-05, |
|
"loss": 0.1254, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 6.238095238095238, |
|
"grad_norm": 0.439165323972702, |
|
"learning_rate": 4.7523809523809523e-05, |
|
"loss": 0.1129, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 6.261904761904762, |
|
"grad_norm": 0.4433733820915222, |
|
"learning_rate": 4.7476190476190475e-05, |
|
"loss": 0.1194, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 6.285714285714286, |
|
"grad_norm": 0.36688342690467834, |
|
"learning_rate": 4.7428571428571427e-05, |
|
"loss": 0.1261, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 6.309523809523809, |
|
"grad_norm": 0.41297096014022827, |
|
"learning_rate": 4.738095238095238e-05, |
|
"loss": 0.1227, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 6.333333333333333, |
|
"grad_norm": 0.4545275866985321, |
|
"learning_rate": 4.733333333333333e-05, |
|
"loss": 0.1067, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 6.357142857142857, |
|
"grad_norm": 0.40353894233703613, |
|
"learning_rate": 4.728571428571429e-05, |
|
"loss": 0.122, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 6.380952380952381, |
|
"grad_norm": 0.36850976943969727, |
|
"learning_rate": 4.723809523809524e-05, |
|
"loss": 0.1036, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 6.404761904761905, |
|
"grad_norm": 0.4579184651374817, |
|
"learning_rate": 4.719047619047619e-05, |
|
"loss": 0.1225, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 6.428571428571429, |
|
"grad_norm": 0.3441743850708008, |
|
"learning_rate": 4.714285714285714e-05, |
|
"loss": 0.1114, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 6.4523809523809526, |
|
"grad_norm": 0.3042502999305725, |
|
"learning_rate": 4.7095238095238094e-05, |
|
"loss": 0.1069, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 6.476190476190476, |
|
"grad_norm": 0.34080275893211365, |
|
"learning_rate": 4.7047619047619046e-05, |
|
"loss": 0.113, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"grad_norm": 0.31439319252967834, |
|
"learning_rate": 4.7000000000000004e-05, |
|
"loss": 0.1119, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 6.523809523809524, |
|
"grad_norm": 0.36084115505218506, |
|
"learning_rate": 4.6952380952380956e-05, |
|
"loss": 0.1064, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 6.5476190476190474, |
|
"grad_norm": 0.38591280579566956, |
|
"learning_rate": 4.690476190476191e-05, |
|
"loss": 0.1303, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 6.571428571428571, |
|
"grad_norm": 0.32427167892456055, |
|
"learning_rate": 4.685714285714286e-05, |
|
"loss": 0.1243, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 6.595238095238095, |
|
"grad_norm": 0.36310875415802, |
|
"learning_rate": 4.680952380952381e-05, |
|
"loss": 0.1252, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 6.619047619047619, |
|
"grad_norm": 0.3615739047527313, |
|
"learning_rate": 4.676190476190476e-05, |
|
"loss": 0.1272, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 6.642857142857143, |
|
"grad_norm": 0.4673764109611511, |
|
"learning_rate": 4.671428571428571e-05, |
|
"loss": 0.1072, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"grad_norm": 0.43281465768814087, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.1104, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.666666666666667, |
|
"eval_dice_score": 0.91285228294862, |
|
"eval_loss": 0.11082390695810318, |
|
"eval_runtime": 8.8851, |
|
"eval_samples_per_second": 10.017, |
|
"eval_steps_per_second": 1.013, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.690476190476191, |
|
"grad_norm": 0.2869553565979004, |
|
"learning_rate": 4.661904761904762e-05, |
|
"loss": 0.1214, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 6.714285714285714, |
|
"grad_norm": 0.30401700735092163, |
|
"learning_rate": 4.6571428571428575e-05, |
|
"loss": 0.1001, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 6.738095238095238, |
|
"grad_norm": 0.2841688096523285, |
|
"learning_rate": 4.6523809523809526e-05, |
|
"loss": 0.1104, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 6.761904761904762, |
|
"grad_norm": 0.3578402101993561, |
|
"learning_rate": 4.647619047619048e-05, |
|
"loss": 0.1234, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 6.785714285714286, |
|
"grad_norm": 0.3647173047065735, |
|
"learning_rate": 4.642857142857143e-05, |
|
"loss": 0.1239, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 6.809523809523809, |
|
"grad_norm": 0.34969446063041687, |
|
"learning_rate": 4.638095238095238e-05, |
|
"loss": 0.1133, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 6.833333333333333, |
|
"grad_norm": 0.48136159777641296, |
|
"learning_rate": 4.633333333333334e-05, |
|
"loss": 0.1228, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 6.857142857142857, |
|
"grad_norm": 0.4168005585670471, |
|
"learning_rate": 4.628571428571429e-05, |
|
"loss": 0.1239, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 6.880952380952381, |
|
"grad_norm": 0.37365084886550903, |
|
"learning_rate": 4.623809523809524e-05, |
|
"loss": 0.1417, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 6.904761904761905, |
|
"grad_norm": 0.3516845703125, |
|
"learning_rate": 4.6190476190476194e-05, |
|
"loss": 0.1439, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 6.928571428571429, |
|
"grad_norm": 0.3463188111782074, |
|
"learning_rate": 4.6142857142857145e-05, |
|
"loss": 0.1258, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 6.9523809523809526, |
|
"grad_norm": 0.3746688961982727, |
|
"learning_rate": 4.60952380952381e-05, |
|
"loss": 0.1208, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 6.976190476190476, |
|
"grad_norm": 0.4245692491531372, |
|
"learning_rate": 4.604761904761905e-05, |
|
"loss": 0.1017, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 0.46867018938064575, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 0.0981, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 7.023809523809524, |
|
"grad_norm": 0.308337539434433, |
|
"learning_rate": 4.595238095238096e-05, |
|
"loss": 0.1375, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 7.0476190476190474, |
|
"grad_norm": 0.4807004928588867, |
|
"learning_rate": 4.59047619047619e-05, |
|
"loss": 0.1427, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 7.071428571428571, |
|
"grad_norm": 0.5126243233680725, |
|
"learning_rate": 4.5857142857142855e-05, |
|
"loss": 0.0971, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 7.095238095238095, |
|
"grad_norm": 0.37334510684013367, |
|
"learning_rate": 4.5809523809523806e-05, |
|
"loss": 0.1248, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 7.119047619047619, |
|
"grad_norm": 0.41911178827285767, |
|
"learning_rate": 4.576190476190476e-05, |
|
"loss": 0.1111, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 7.142857142857143, |
|
"grad_norm": 0.3633350133895874, |
|
"learning_rate": 4.5714285714285716e-05, |
|
"loss": 0.0931, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.142857142857143, |
|
"eval_dice_score": 0.9114141838731112, |
|
"eval_loss": 0.10547726601362228, |
|
"eval_runtime": 8.8954, |
|
"eval_samples_per_second": 10.005, |
|
"eval_steps_per_second": 1.012, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.166666666666667, |
|
"grad_norm": 0.3622587323188782, |
|
"learning_rate": 4.566666666666667e-05, |
|
"loss": 0.0989, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 7.190476190476191, |
|
"grad_norm": 0.53045254945755, |
|
"learning_rate": 4.561904761904762e-05, |
|
"loss": 0.1146, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 7.214285714285714, |
|
"grad_norm": 0.2951499819755554, |
|
"learning_rate": 4.557142857142857e-05, |
|
"loss": 0.12, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 7.238095238095238, |
|
"grad_norm": 0.29450082778930664, |
|
"learning_rate": 4.552380952380952e-05, |
|
"loss": 0.0994, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 7.261904761904762, |
|
"grad_norm": 0.35549187660217285, |
|
"learning_rate": 4.5476190476190474e-05, |
|
"loss": 0.1136, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 7.285714285714286, |
|
"grad_norm": 0.4132053256034851, |
|
"learning_rate": 4.5428571428571425e-05, |
|
"loss": 0.1176, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 7.309523809523809, |
|
"grad_norm": 0.540023922920227, |
|
"learning_rate": 4.5380952380952384e-05, |
|
"loss": 0.1395, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 7.333333333333333, |
|
"grad_norm": 0.3276040852069855, |
|
"learning_rate": 4.5333333333333335e-05, |
|
"loss": 0.1218, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 7.357142857142857, |
|
"grad_norm": 0.2875545024871826, |
|
"learning_rate": 4.528571428571429e-05, |
|
"loss": 0.1031, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 7.380952380952381, |
|
"grad_norm": 0.3251423239707947, |
|
"learning_rate": 4.523809523809524e-05, |
|
"loss": 0.1086, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 7.404761904761905, |
|
"grad_norm": 0.3128550946712494, |
|
"learning_rate": 4.519047619047619e-05, |
|
"loss": 0.1142, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 7.428571428571429, |
|
"grad_norm": 0.29231375455856323, |
|
"learning_rate": 4.514285714285714e-05, |
|
"loss": 0.1177, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 7.4523809523809526, |
|
"grad_norm": 0.27210715413093567, |
|
"learning_rate": 4.509523809523809e-05, |
|
"loss": 0.0969, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 7.476190476190476, |
|
"grad_norm": 0.35545647144317627, |
|
"learning_rate": 4.504761904761905e-05, |
|
"loss": 0.1007, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 7.5, |
|
"grad_norm": 0.37293487787246704, |
|
"learning_rate": 4.5e-05, |
|
"loss": 0.113, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 7.523809523809524, |
|
"grad_norm": 0.45221376419067383, |
|
"learning_rate": 4.4952380952380954e-05, |
|
"loss": 0.1136, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 7.5476190476190474, |
|
"grad_norm": 0.4034024775028229, |
|
"learning_rate": 4.4904761904761906e-05, |
|
"loss": 0.1063, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 7.571428571428571, |
|
"grad_norm": 0.35284796357154846, |
|
"learning_rate": 4.485714285714286e-05, |
|
"loss": 0.1058, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 7.595238095238095, |
|
"grad_norm": 0.31462305784225464, |
|
"learning_rate": 4.480952380952381e-05, |
|
"loss": 0.097, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 7.619047619047619, |
|
"grad_norm": 0.35902947187423706, |
|
"learning_rate": 4.476190476190476e-05, |
|
"loss": 0.0987, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 7.619047619047619, |
|
"eval_dice_score": 0.9146509876386677, |
|
"eval_loss": 0.10323517769575119, |
|
"eval_runtime": 8.7797, |
|
"eval_samples_per_second": 10.137, |
|
"eval_steps_per_second": 1.025, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 7.642857142857143, |
|
"grad_norm": 0.4236834943294525, |
|
"learning_rate": 4.471428571428572e-05, |
|
"loss": 0.1157, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 7.666666666666667, |
|
"grad_norm": 0.3109116554260254, |
|
"learning_rate": 4.466666666666667e-05, |
|
"loss": 0.11, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 7.690476190476191, |
|
"grad_norm": 0.3016926944255829, |
|
"learning_rate": 4.461904761904762e-05, |
|
"loss": 0.123, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 7.714285714285714, |
|
"grad_norm": 0.27012699842453003, |
|
"learning_rate": 4.4571428571428574e-05, |
|
"loss": 0.1271, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 7.738095238095238, |
|
"grad_norm": 0.38100895285606384, |
|
"learning_rate": 4.4523809523809525e-05, |
|
"loss": 0.1305, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 7.761904761904762, |
|
"grad_norm": 0.3512851595878601, |
|
"learning_rate": 4.447619047619048e-05, |
|
"loss": 0.1106, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 7.785714285714286, |
|
"grad_norm": 0.5073042511940002, |
|
"learning_rate": 4.4428571428571435e-05, |
|
"loss": 0.117, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 7.809523809523809, |
|
"grad_norm": 0.2925731837749481, |
|
"learning_rate": 4.4380952380952386e-05, |
|
"loss": 0.0982, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 7.833333333333333, |
|
"grad_norm": 0.2976164221763611, |
|
"learning_rate": 4.433333333333334e-05, |
|
"loss": 0.1039, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 7.857142857142857, |
|
"grad_norm": 0.35800570249557495, |
|
"learning_rate": 4.428571428571429e-05, |
|
"loss": 0.1358, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 7.880952380952381, |
|
"grad_norm": 0.35599473118782043, |
|
"learning_rate": 4.423809523809524e-05, |
|
"loss": 0.1201, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 7.904761904761905, |
|
"grad_norm": 0.35861942172050476, |
|
"learning_rate": 4.419047619047619e-05, |
|
"loss": 0.1029, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 7.928571428571429, |
|
"grad_norm": 0.29498741030693054, |
|
"learning_rate": 4.4142857142857144e-05, |
|
"loss": 0.0926, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 7.9523809523809526, |
|
"grad_norm": 0.37311461567878723, |
|
"learning_rate": 4.40952380952381e-05, |
|
"loss": 0.1318, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 7.976190476190476, |
|
"grad_norm": 0.3335290551185608, |
|
"learning_rate": 4.404761904761905e-05, |
|
"loss": 0.1129, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 0.3926750123500824, |
|
"learning_rate": 4.4e-05, |
|
"loss": 0.0913, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 8.023809523809524, |
|
"grad_norm": 0.2891796827316284, |
|
"learning_rate": 4.395238095238095e-05, |
|
"loss": 0.1046, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 8.047619047619047, |
|
"grad_norm": 0.3221498429775238, |
|
"learning_rate": 4.39047619047619e-05, |
|
"loss": 0.111, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 8.071428571428571, |
|
"grad_norm": 0.3588779866695404, |
|
"learning_rate": 4.3857142857142853e-05, |
|
"loss": 0.1027, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 8.095238095238095, |
|
"grad_norm": 0.3896290361881256, |
|
"learning_rate": 4.3809523809523805e-05, |
|
"loss": 0.1011, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.095238095238095, |
|
"eval_dice_score": 0.9160615055344299, |
|
"eval_loss": 0.10283860564231873, |
|
"eval_runtime": 9.022, |
|
"eval_samples_per_second": 9.865, |
|
"eval_steps_per_second": 0.998, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.119047619047619, |
|
"grad_norm": 0.2446659505367279, |
|
"learning_rate": 4.376190476190476e-05, |
|
"loss": 0.0999, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 8.142857142857142, |
|
"grad_norm": 0.27880406379699707, |
|
"learning_rate": 4.3714285714285715e-05, |
|
"loss": 0.0976, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 8.166666666666666, |
|
"grad_norm": 0.3263701796531677, |
|
"learning_rate": 4.3666666666666666e-05, |
|
"loss": 0.1059, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 8.19047619047619, |
|
"grad_norm": 0.31584370136260986, |
|
"learning_rate": 4.361904761904762e-05, |
|
"loss": 0.1334, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 8.214285714285714, |
|
"grad_norm": 0.37839260697364807, |
|
"learning_rate": 4.357142857142857e-05, |
|
"loss": 0.1344, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 8.238095238095237, |
|
"grad_norm": 0.33257240056991577, |
|
"learning_rate": 4.352380952380952e-05, |
|
"loss": 0.094, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 8.261904761904763, |
|
"grad_norm": 0.28752750158309937, |
|
"learning_rate": 4.347619047619048e-05, |
|
"loss": 0.1036, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 8.285714285714286, |
|
"grad_norm": 0.34251728653907776, |
|
"learning_rate": 4.342857142857143e-05, |
|
"loss": 0.1096, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 8.30952380952381, |
|
"grad_norm": 0.3254129886627197, |
|
"learning_rate": 4.338095238095238e-05, |
|
"loss": 0.0908, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 8.333333333333334, |
|
"grad_norm": 0.3975524306297302, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"loss": 0.1199, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.357142857142858, |
|
"grad_norm": 0.3707966208457947, |
|
"learning_rate": 4.3285714285714286e-05, |
|
"loss": 0.101, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 8.380952380952381, |
|
"grad_norm": 0.29389944672584534, |
|
"learning_rate": 4.323809523809524e-05, |
|
"loss": 0.1054, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 8.404761904761905, |
|
"grad_norm": 0.4392881691455841, |
|
"learning_rate": 4.319047619047619e-05, |
|
"loss": 0.1212, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 8.428571428571429, |
|
"grad_norm": 0.34391042590141296, |
|
"learning_rate": 4.314285714285715e-05, |
|
"loss": 0.0878, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 8.452380952380953, |
|
"grad_norm": 0.2981056272983551, |
|
"learning_rate": 4.30952380952381e-05, |
|
"loss": 0.1015, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 8.476190476190476, |
|
"grad_norm": 0.589784562587738, |
|
"learning_rate": 4.304761904761905e-05, |
|
"loss": 0.1483, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"grad_norm": 0.34352627396583557, |
|
"learning_rate": 4.3e-05, |
|
"loss": 0.0927, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 8.523809523809524, |
|
"grad_norm": 0.3901398181915283, |
|
"learning_rate": 4.295238095238095e-05, |
|
"loss": 0.1039, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 8.547619047619047, |
|
"grad_norm": 0.48700276017189026, |
|
"learning_rate": 4.2904761904761905e-05, |
|
"loss": 0.1376, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"grad_norm": 0.40282323956489563, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.118, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 8.571428571428571, |
|
"eval_dice_score": 0.9140197492850877, |
|
"eval_loss": 0.09799595177173615, |
|
"eval_runtime": 8.8085, |
|
"eval_samples_per_second": 10.104, |
|
"eval_steps_per_second": 1.022, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 8.595238095238095, |
|
"grad_norm": 0.28458479046821594, |
|
"learning_rate": 4.2809523809523815e-05, |
|
"loss": 0.0886, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 8.619047619047619, |
|
"grad_norm": 0.36778727173805237, |
|
"learning_rate": 4.2761904761904766e-05, |
|
"loss": 0.0932, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 8.642857142857142, |
|
"grad_norm": 0.3417188227176666, |
|
"learning_rate": 4.271428571428572e-05, |
|
"loss": 0.1171, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 8.666666666666666, |
|
"grad_norm": 0.32228532433509827, |
|
"learning_rate": 4.266666666666667e-05, |
|
"loss": 0.108, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 8.69047619047619, |
|
"grad_norm": 0.2850722372531891, |
|
"learning_rate": 4.261904761904762e-05, |
|
"loss": 0.1081, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 8.714285714285714, |
|
"grad_norm": 0.42572009563446045, |
|
"learning_rate": 4.257142857142857e-05, |
|
"loss": 0.1087, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 8.738095238095237, |
|
"grad_norm": 0.42448675632476807, |
|
"learning_rate": 4.2523809523809524e-05, |
|
"loss": 0.0925, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 8.761904761904763, |
|
"grad_norm": 0.3276635706424713, |
|
"learning_rate": 4.247619047619048e-05, |
|
"loss": 0.1016, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 8.785714285714286, |
|
"grad_norm": 0.3028334081172943, |
|
"learning_rate": 4.2428571428571434e-05, |
|
"loss": 0.1055, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 8.80952380952381, |
|
"grad_norm": 0.2703326642513275, |
|
"learning_rate": 4.2380952380952385e-05, |
|
"loss": 0.0981, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 8.833333333333334, |
|
"grad_norm": 0.41063007712364197, |
|
"learning_rate": 4.233333333333334e-05, |
|
"loss": 0.119, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 8.857142857142858, |
|
"grad_norm": 0.4103991985321045, |
|
"learning_rate": 4.228571428571429e-05, |
|
"loss": 0.1243, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 8.880952380952381, |
|
"grad_norm": 0.4108157753944397, |
|
"learning_rate": 4.223809523809524e-05, |
|
"loss": 0.1145, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 8.904761904761905, |
|
"grad_norm": 0.29548731446266174, |
|
"learning_rate": 4.219047619047619e-05, |
|
"loss": 0.095, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 8.928571428571429, |
|
"grad_norm": 0.35359784960746765, |
|
"learning_rate": 4.214285714285714e-05, |
|
"loss": 0.104, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 8.952380952380953, |
|
"grad_norm": 0.8022054433822632, |
|
"learning_rate": 4.2095238095238095e-05, |
|
"loss": 0.1198, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 8.976190476190476, |
|
"grad_norm": 0.3940359354019165, |
|
"learning_rate": 4.2047619047619046e-05, |
|
"loss": 0.1185, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 0.3221697509288788, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.0962, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 9.023809523809524, |
|
"grad_norm": 0.4048317074775696, |
|
"learning_rate": 4.195238095238095e-05, |
|
"loss": 0.1212, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 9.047619047619047, |
|
"grad_norm": 0.2531253397464752, |
|
"learning_rate": 4.19047619047619e-05, |
|
"loss": 0.0929, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.047619047619047, |
|
"eval_dice_score": 0.9176058599475401, |
|
"eval_loss": 0.09571261703968048, |
|
"eval_runtime": 8.8625, |
|
"eval_samples_per_second": 10.042, |
|
"eval_steps_per_second": 1.016, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.071428571428571, |
|
"grad_norm": 0.27786773443222046, |
|
"learning_rate": 4.185714285714286e-05, |
|
"loss": 0.1012, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 9.095238095238095, |
|
"grad_norm": 0.3440767526626587, |
|
"learning_rate": 4.180952380952381e-05, |
|
"loss": 0.0946, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 9.119047619047619, |
|
"grad_norm": 0.2682666778564453, |
|
"learning_rate": 4.176190476190476e-05, |
|
"loss": 0.0949, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 9.142857142857142, |
|
"grad_norm": 0.4733286499977112, |
|
"learning_rate": 4.1714285714285714e-05, |
|
"loss": 0.1239, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 9.166666666666666, |
|
"grad_norm": 0.24958311021327972, |
|
"learning_rate": 4.1666666666666665e-05, |
|
"loss": 0.0937, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 9.19047619047619, |
|
"grad_norm": 0.23216277360916138, |
|
"learning_rate": 4.161904761904762e-05, |
|
"loss": 0.0918, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 9.214285714285714, |
|
"grad_norm": 0.3856890797615051, |
|
"learning_rate": 4.157142857142857e-05, |
|
"loss": 0.0795, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 9.238095238095237, |
|
"grad_norm": 0.29770028591156006, |
|
"learning_rate": 4.152380952380953e-05, |
|
"loss": 0.1268, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 9.261904761904763, |
|
"grad_norm": 0.26541242003440857, |
|
"learning_rate": 4.147619047619048e-05, |
|
"loss": 0.1055, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 9.285714285714286, |
|
"grad_norm": 0.3441533148288727, |
|
"learning_rate": 4.142857142857143e-05, |
|
"loss": 0.1188, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 9.30952380952381, |
|
"grad_norm": 0.4041042625904083, |
|
"learning_rate": 4.138095238095238e-05, |
|
"loss": 0.1069, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 9.333333333333334, |
|
"grad_norm": 0.25647029280662537, |
|
"learning_rate": 4.133333333333333e-05, |
|
"loss": 0.0989, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 9.357142857142858, |
|
"grad_norm": 0.2511276602745056, |
|
"learning_rate": 4.1285714285714284e-05, |
|
"loss": 0.0903, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 9.380952380952381, |
|
"grad_norm": 0.26701053977012634, |
|
"learning_rate": 4.1238095238095236e-05, |
|
"loss": 0.0932, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 9.404761904761905, |
|
"grad_norm": 0.3096204102039337, |
|
"learning_rate": 4.1190476190476194e-05, |
|
"loss": 0.0974, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 9.428571428571429, |
|
"grad_norm": 0.5277116894721985, |
|
"learning_rate": 4.1142857142857146e-05, |
|
"loss": 0.1466, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 9.452380952380953, |
|
"grad_norm": 0.4356262981891632, |
|
"learning_rate": 4.10952380952381e-05, |
|
"loss": 0.1091, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 9.476190476190476, |
|
"grad_norm": 0.28454017639160156, |
|
"learning_rate": 4.104761904761905e-05, |
|
"loss": 0.0975, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 9.5, |
|
"grad_norm": 0.2634621262550354, |
|
"learning_rate": 4.1e-05, |
|
"loss": 0.1062, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 9.523809523809524, |
|
"grad_norm": 0.26061224937438965, |
|
"learning_rate": 4.095238095238095e-05, |
|
"loss": 0.1092, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.523809523809524, |
|
"eval_dice_score": 0.9180726876748045, |
|
"eval_loss": 0.09548372775316238, |
|
"eval_runtime": 8.7658, |
|
"eval_samples_per_second": 10.153, |
|
"eval_steps_per_second": 1.027, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.547619047619047, |
|
"grad_norm": 0.24562084674835205, |
|
"learning_rate": 4.090476190476191e-05, |
|
"loss": 0.1044, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 9.571428571428571, |
|
"grad_norm": 0.3777954876422882, |
|
"learning_rate": 4.085714285714286e-05, |
|
"loss": 0.1181, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 9.595238095238095, |
|
"grad_norm": 0.3255544602870941, |
|
"learning_rate": 4.0809523809523813e-05, |
|
"loss": 0.1, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 9.619047619047619, |
|
"grad_norm": 0.29565364122390747, |
|
"learning_rate": 4.0761904761904765e-05, |
|
"loss": 0.1024, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 9.642857142857142, |
|
"grad_norm": 0.27408862113952637, |
|
"learning_rate": 4.0714285714285717e-05, |
|
"loss": 0.0998, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 9.666666666666666, |
|
"grad_norm": 0.28693994879722595, |
|
"learning_rate": 4.066666666666667e-05, |
|
"loss": 0.0887, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 9.69047619047619, |
|
"grad_norm": 0.2059289962053299, |
|
"learning_rate": 4.061904761904762e-05, |
|
"loss": 0.1142, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 9.714285714285714, |
|
"grad_norm": 0.2377048283815384, |
|
"learning_rate": 4.057142857142858e-05, |
|
"loss": 0.0896, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 9.738095238095237, |
|
"grad_norm": 0.5583423376083374, |
|
"learning_rate": 4.052380952380953e-05, |
|
"loss": 0.1117, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 9.761904761904763, |
|
"grad_norm": 0.2231013923883438, |
|
"learning_rate": 4.047619047619048e-05, |
|
"loss": 0.0923, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 9.785714285714286, |
|
"grad_norm": 0.37593528628349304, |
|
"learning_rate": 4.042857142857143e-05, |
|
"loss": 0.1224, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 9.80952380952381, |
|
"grad_norm": 0.2936907410621643, |
|
"learning_rate": 4.0380952380952384e-05, |
|
"loss": 0.0954, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 9.833333333333334, |
|
"grad_norm": 0.2800084054470062, |
|
"learning_rate": 4.0333333333333336e-05, |
|
"loss": 0.1021, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 9.857142857142858, |
|
"grad_norm": 0.2564726173877716, |
|
"learning_rate": 4.028571428571428e-05, |
|
"loss": 0.1039, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 9.880952380952381, |
|
"grad_norm": 0.3026561439037323, |
|
"learning_rate": 4.023809523809524e-05, |
|
"loss": 0.0859, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 9.904761904761905, |
|
"grad_norm": 0.3142635226249695, |
|
"learning_rate": 4.019047619047619e-05, |
|
"loss": 0.0934, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 9.928571428571429, |
|
"grad_norm": 0.3444440960884094, |
|
"learning_rate": 4.014285714285714e-05, |
|
"loss": 0.0981, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 9.952380952380953, |
|
"grad_norm": 0.3670935332775116, |
|
"learning_rate": 4.0095238095238093e-05, |
|
"loss": 0.1012, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 9.976190476190476, |
|
"grad_norm": 0.508635938167572, |
|
"learning_rate": 4.0047619047619045e-05, |
|
"loss": 0.096, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.5292103886604309, |
|
"learning_rate": 3.9999999999999996e-05, |
|
"loss": 0.1119, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_dice_score": 0.918242837228066, |
|
"eval_loss": 0.09436747431755066, |
|
"eval_runtime": 8.8686, |
|
"eval_samples_per_second": 10.035, |
|
"eval_steps_per_second": 1.015, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.023809523809524, |
|
"grad_norm": 0.24404850602149963, |
|
"learning_rate": 3.995238095238095e-05, |
|
"loss": 0.0966, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 10.047619047619047, |
|
"grad_norm": 0.21881720423698425, |
|
"learning_rate": 3.9904761904761906e-05, |
|
"loss": 0.0868, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 10.071428571428571, |
|
"grad_norm": 0.2544311583042145, |
|
"learning_rate": 3.985714285714286e-05, |
|
"loss": 0.0981, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 10.095238095238095, |
|
"grad_norm": 0.3842509686946869, |
|
"learning_rate": 3.980952380952381e-05, |
|
"loss": 0.102, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 10.119047619047619, |
|
"grad_norm": 0.2687145471572876, |
|
"learning_rate": 3.976190476190476e-05, |
|
"loss": 0.0896, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 10.142857142857142, |
|
"grad_norm": 0.23764175176620483, |
|
"learning_rate": 3.971428571428571e-05, |
|
"loss": 0.1104, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 10.166666666666666, |
|
"grad_norm": 0.36458808183670044, |
|
"learning_rate": 3.9666666666666664e-05, |
|
"loss": 0.1026, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 10.19047619047619, |
|
"grad_norm": 0.29205796122550964, |
|
"learning_rate": 3.961904761904762e-05, |
|
"loss": 0.0931, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 10.214285714285714, |
|
"grad_norm": 0.30714672803878784, |
|
"learning_rate": 3.9571428571428574e-05, |
|
"loss": 0.0899, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 10.238095238095237, |
|
"grad_norm": 0.4927786588668823, |
|
"learning_rate": 3.9523809523809526e-05, |
|
"loss": 0.1027, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 10.261904761904763, |
|
"grad_norm": 0.2786138951778412, |
|
"learning_rate": 3.947619047619048e-05, |
|
"loss": 0.1026, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 10.285714285714286, |
|
"grad_norm": 0.318553626537323, |
|
"learning_rate": 3.942857142857143e-05, |
|
"loss": 0.1101, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 10.30952380952381, |
|
"grad_norm": 0.3124142289161682, |
|
"learning_rate": 3.938095238095238e-05, |
|
"loss": 0.1258, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 10.333333333333334, |
|
"grad_norm": 0.3818247318267822, |
|
"learning_rate": 3.933333333333333e-05, |
|
"loss": 0.0995, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 10.357142857142858, |
|
"grad_norm": 0.281456857919693, |
|
"learning_rate": 3.928571428571429e-05, |
|
"loss": 0.0874, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 10.380952380952381, |
|
"grad_norm": 0.3376539349555969, |
|
"learning_rate": 3.923809523809524e-05, |
|
"loss": 0.0799, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 10.404761904761905, |
|
"grad_norm": 0.2396553009748459, |
|
"learning_rate": 3.919047619047619e-05, |
|
"loss": 0.1047, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 10.428571428571429, |
|
"grad_norm": 0.2931698262691498, |
|
"learning_rate": 3.9142857142857145e-05, |
|
"loss": 0.0972, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 10.452380952380953, |
|
"grad_norm": 0.30064502358436584, |
|
"learning_rate": 3.9095238095238096e-05, |
|
"loss": 0.0975, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 10.476190476190476, |
|
"grad_norm": 0.4932076036930084, |
|
"learning_rate": 3.904761904761905e-05, |
|
"loss": 0.095, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 10.476190476190476, |
|
"eval_dice_score": 0.9177181678377411, |
|
"eval_loss": 0.0928926020860672, |
|
"eval_runtime": 8.9369, |
|
"eval_samples_per_second": 9.959, |
|
"eval_steps_per_second": 1.007, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 10.5, |
|
"grad_norm": 0.26641082763671875, |
|
"learning_rate": 3.9e-05, |
|
"loss": 0.0955, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 10.523809523809524, |
|
"grad_norm": 0.32811999320983887, |
|
"learning_rate": 3.895238095238096e-05, |
|
"loss": 0.1066, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 10.547619047619047, |
|
"grad_norm": 0.46325215697288513, |
|
"learning_rate": 3.890476190476191e-05, |
|
"loss": 0.1004, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 10.571428571428571, |
|
"grad_norm": 0.28526756167411804, |
|
"learning_rate": 3.885714285714286e-05, |
|
"loss": 0.0977, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 10.595238095238095, |
|
"grad_norm": 0.27527350187301636, |
|
"learning_rate": 3.880952380952381e-05, |
|
"loss": 0.0879, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 10.619047619047619, |
|
"grad_norm": 0.36892035603523254, |
|
"learning_rate": 3.8761904761904764e-05, |
|
"loss": 0.1032, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 10.642857142857142, |
|
"grad_norm": 0.3519724905490875, |
|
"learning_rate": 3.8714285714285715e-05, |
|
"loss": 0.1044, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 10.666666666666666, |
|
"grad_norm": 1.0675806999206543, |
|
"learning_rate": 3.866666666666667e-05, |
|
"loss": 0.1241, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 10.69047619047619, |
|
"grad_norm": 0.3300955891609192, |
|
"learning_rate": 3.8619047619047625e-05, |
|
"loss": 0.1002, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 10.714285714285714, |
|
"grad_norm": 0.3200720250606537, |
|
"learning_rate": 3.857142857142858e-05, |
|
"loss": 0.092, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 10.738095238095237, |
|
"grad_norm": 0.32000890374183655, |
|
"learning_rate": 3.852380952380953e-05, |
|
"loss": 0.0894, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 10.761904761904763, |
|
"grad_norm": 0.5081201195716858, |
|
"learning_rate": 3.847619047619048e-05, |
|
"loss": 0.116, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 10.785714285714286, |
|
"grad_norm": 0.4235091507434845, |
|
"learning_rate": 3.8428571428571425e-05, |
|
"loss": 0.1047, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 10.80952380952381, |
|
"grad_norm": 0.24375148117542267, |
|
"learning_rate": 3.8380952380952376e-05, |
|
"loss": 0.0878, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 10.833333333333334, |
|
"grad_norm": 0.32877597212791443, |
|
"learning_rate": 3.8333333333333334e-05, |
|
"loss": 0.0963, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 10.857142857142858, |
|
"grad_norm": 0.31234005093574524, |
|
"learning_rate": 3.8285714285714286e-05, |
|
"loss": 0.1155, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 10.880952380952381, |
|
"grad_norm": 0.2086906135082245, |
|
"learning_rate": 3.823809523809524e-05, |
|
"loss": 0.0933, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 10.904761904761905, |
|
"grad_norm": 0.22270095348358154, |
|
"learning_rate": 3.819047619047619e-05, |
|
"loss": 0.0858, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 10.928571428571429, |
|
"grad_norm": 0.2681645154953003, |
|
"learning_rate": 3.814285714285714e-05, |
|
"loss": 0.1056, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 10.952380952380953, |
|
"grad_norm": 0.5809670686721802, |
|
"learning_rate": 3.809523809523809e-05, |
|
"loss": 0.1205, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 10.952380952380953, |
|
"eval_dice_score": 0.9194055696314088, |
|
"eval_loss": 0.09057500213384628, |
|
"eval_runtime": 8.8575, |
|
"eval_samples_per_second": 10.048, |
|
"eval_steps_per_second": 1.016, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 10.976190476190476, |
|
"grad_norm": 0.31106501817703247, |
|
"learning_rate": 3.8047619047619044e-05, |
|
"loss": 0.1124, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"grad_norm": 0.23329056799411774, |
|
"learning_rate": 3.8e-05, |
|
"loss": 0.1003, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 11.023809523809524, |
|
"grad_norm": 0.3269498944282532, |
|
"learning_rate": 3.7952380952380954e-05, |
|
"loss": 0.1102, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 11.047619047619047, |
|
"grad_norm": 0.41568589210510254, |
|
"learning_rate": 3.7904761904761905e-05, |
|
"loss": 0.0839, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 11.071428571428571, |
|
"grad_norm": 0.34682050347328186, |
|
"learning_rate": 3.785714285714286e-05, |
|
"loss": 0.0886, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 11.095238095238095, |
|
"grad_norm": 0.4217880070209503, |
|
"learning_rate": 3.780952380952381e-05, |
|
"loss": 0.0871, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 11.119047619047619, |
|
"grad_norm": 0.2800375819206238, |
|
"learning_rate": 3.776190476190476e-05, |
|
"loss": 0.1045, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 11.142857142857142, |
|
"grad_norm": 0.25883740186691284, |
|
"learning_rate": 3.771428571428571e-05, |
|
"loss": 0.0879, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 11.166666666666666, |
|
"grad_norm": 0.26584306359291077, |
|
"learning_rate": 3.766666666666667e-05, |
|
"loss": 0.1041, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 11.19047619047619, |
|
"grad_norm": 0.3690215051174164, |
|
"learning_rate": 3.761904761904762e-05, |
|
"loss": 0.1049, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 11.214285714285714, |
|
"grad_norm": 0.23434555530548096, |
|
"learning_rate": 3.757142857142857e-05, |
|
"loss": 0.0929, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 11.238095238095237, |
|
"grad_norm": 0.23841549456119537, |
|
"learning_rate": 3.7523809523809524e-05, |
|
"loss": 0.0853, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 11.261904761904763, |
|
"grad_norm": 0.21769563853740692, |
|
"learning_rate": 3.7476190476190476e-05, |
|
"loss": 0.0873, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 11.285714285714286, |
|
"grad_norm": 0.2575165033340454, |
|
"learning_rate": 3.742857142857143e-05, |
|
"loss": 0.0969, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 11.30952380952381, |
|
"grad_norm": 0.7376023530960083, |
|
"learning_rate": 3.738095238095238e-05, |
|
"loss": 0.107, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 11.333333333333334, |
|
"grad_norm": 0.6353978514671326, |
|
"learning_rate": 3.733333333333334e-05, |
|
"loss": 0.117, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 11.357142857142858, |
|
"grad_norm": 0.3576112985610962, |
|
"learning_rate": 3.728571428571429e-05, |
|
"loss": 0.0776, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 11.380952380952381, |
|
"grad_norm": 0.3014508783817291, |
|
"learning_rate": 3.723809523809524e-05, |
|
"loss": 0.1066, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 11.404761904761905, |
|
"grad_norm": 0.3285028040409088, |
|
"learning_rate": 3.719047619047619e-05, |
|
"loss": 0.1115, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"grad_norm": 0.4150606393814087, |
|
"learning_rate": 3.7142857142857143e-05, |
|
"loss": 0.1011, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 11.428571428571429, |
|
"eval_dice_score": 0.9186469055451411, |
|
"eval_loss": 0.09131216257810593, |
|
"eval_runtime": 8.8331, |
|
"eval_samples_per_second": 10.076, |
|
"eval_steps_per_second": 1.019, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 11.452380952380953, |
|
"grad_norm": 0.3601835072040558, |
|
"learning_rate": 3.7095238095238095e-05, |
|
"loss": 0.1069, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 11.476190476190476, |
|
"grad_norm": 0.30209657549858093, |
|
"learning_rate": 3.704761904761905e-05, |
|
"loss": 0.1357, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"grad_norm": 0.3924671709537506, |
|
"learning_rate": 3.7000000000000005e-05, |
|
"loss": 0.0962, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 11.523809523809524, |
|
"grad_norm": 0.28674888610839844, |
|
"learning_rate": 3.6952380952380956e-05, |
|
"loss": 0.0863, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 11.547619047619047, |
|
"grad_norm": 0.25611767172813416, |
|
"learning_rate": 3.690476190476191e-05, |
|
"loss": 0.1034, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 11.571428571428571, |
|
"grad_norm": 0.41438841819763184, |
|
"learning_rate": 3.685714285714286e-05, |
|
"loss": 0.0873, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 11.595238095238095, |
|
"grad_norm": 0.5831720232963562, |
|
"learning_rate": 3.680952380952381e-05, |
|
"loss": 0.0965, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 11.619047619047619, |
|
"grad_norm": 0.2311333268880844, |
|
"learning_rate": 3.676190476190476e-05, |
|
"loss": 0.0888, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 11.642857142857142, |
|
"grad_norm": 0.2140185832977295, |
|
"learning_rate": 3.671428571428572e-05, |
|
"loss": 0.1114, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 11.666666666666666, |
|
"grad_norm": 0.2992950975894928, |
|
"learning_rate": 3.666666666666667e-05, |
|
"loss": 0.0936, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 11.69047619047619, |
|
"grad_norm": 0.4659542143344879, |
|
"learning_rate": 3.6619047619047624e-05, |
|
"loss": 0.1018, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 11.714285714285714, |
|
"grad_norm": 0.32879874110221863, |
|
"learning_rate": 3.6571428571428576e-05, |
|
"loss": 0.0946, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 11.738095238095237, |
|
"grad_norm": 0.3425431251525879, |
|
"learning_rate": 3.652380952380952e-05, |
|
"loss": 0.0885, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 11.761904761904763, |
|
"grad_norm": 0.24904826283454895, |
|
"learning_rate": 3.647619047619047e-05, |
|
"loss": 0.0913, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 11.785714285714286, |
|
"grad_norm": 0.26887717843055725, |
|
"learning_rate": 3.6428571428571423e-05, |
|
"loss": 0.0827, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 11.80952380952381, |
|
"grad_norm": 0.2696100175380707, |
|
"learning_rate": 3.638095238095238e-05, |
|
"loss": 0.0879, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 11.833333333333334, |
|
"grad_norm": 0.28169044852256775, |
|
"learning_rate": 3.633333333333333e-05, |
|
"loss": 0.1033, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 11.857142857142858, |
|
"grad_norm": 0.35210949182510376, |
|
"learning_rate": 3.6285714285714285e-05, |
|
"loss": 0.0972, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 11.880952380952381, |
|
"grad_norm": 0.2619428336620331, |
|
"learning_rate": 3.6238095238095236e-05, |
|
"loss": 0.1012, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 11.904761904761905, |
|
"grad_norm": 0.2678943872451782, |
|
"learning_rate": 3.619047619047619e-05, |
|
"loss": 0.0886, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.904761904761905, |
|
"eval_dice_score": 0.9205903160321725, |
|
"eval_loss": 0.09119950979948044, |
|
"eval_runtime": 8.8433, |
|
"eval_samples_per_second": 10.064, |
|
"eval_steps_per_second": 1.018, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.928571428571429, |
|
"grad_norm": 0.26405709981918335, |
|
"learning_rate": 3.614285714285714e-05, |
|
"loss": 0.1017, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 11.952380952380953, |
|
"grad_norm": 0.8981893062591553, |
|
"learning_rate": 3.60952380952381e-05, |
|
"loss": 0.1172, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 11.976190476190476, |
|
"grad_norm": 0.504848837852478, |
|
"learning_rate": 3.604761904761905e-05, |
|
"loss": 0.0996, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 0.2666759490966797, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.0847, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 12.023809523809524, |
|
"grad_norm": 0.2892642021179199, |
|
"learning_rate": 3.595238095238095e-05, |
|
"loss": 0.085, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 12.047619047619047, |
|
"grad_norm": 0.5016778707504272, |
|
"learning_rate": 3.5904761904761904e-05, |
|
"loss": 0.0999, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 12.071428571428571, |
|
"grad_norm": 0.302896648645401, |
|
"learning_rate": 3.5857142857142856e-05, |
|
"loss": 0.1005, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 12.095238095238095, |
|
"grad_norm": 1.1246132850646973, |
|
"learning_rate": 3.580952380952381e-05, |
|
"loss": 0.1163, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 12.119047619047619, |
|
"grad_norm": 0.33260878920555115, |
|
"learning_rate": 3.5761904761904765e-05, |
|
"loss": 0.1062, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 12.142857142857142, |
|
"grad_norm": 0.2548132538795471, |
|
"learning_rate": 3.571428571428572e-05, |
|
"loss": 0.1041, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 12.166666666666666, |
|
"grad_norm": 0.3263733983039856, |
|
"learning_rate": 3.566666666666667e-05, |
|
"loss": 0.076, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 12.19047619047619, |
|
"grad_norm": 0.4536820948123932, |
|
"learning_rate": 3.561904761904762e-05, |
|
"loss": 0.1004, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 12.214285714285714, |
|
"grad_norm": 0.3253687024116516, |
|
"learning_rate": 3.557142857142857e-05, |
|
"loss": 0.0959, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 12.238095238095237, |
|
"grad_norm": 0.4816119968891144, |
|
"learning_rate": 3.552380952380952e-05, |
|
"loss": 0.1107, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 12.261904761904763, |
|
"grad_norm": 0.2999951243400574, |
|
"learning_rate": 3.5476190476190475e-05, |
|
"loss": 0.1103, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 12.285714285714286, |
|
"grad_norm": 0.2874181866645813, |
|
"learning_rate": 3.542857142857143e-05, |
|
"loss": 0.0994, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 12.30952380952381, |
|
"grad_norm": 0.22572314739227295, |
|
"learning_rate": 3.5380952380952385e-05, |
|
"loss": 0.1032, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 12.333333333333334, |
|
"grad_norm": 0.24886523187160492, |
|
"learning_rate": 3.5333333333333336e-05, |
|
"loss": 0.0892, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 12.357142857142858, |
|
"grad_norm": 0.34990599751472473, |
|
"learning_rate": 3.528571428571429e-05, |
|
"loss": 0.0866, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 12.380952380952381, |
|
"grad_norm": 0.4522719383239746, |
|
"learning_rate": 3.523809523809524e-05, |
|
"loss": 0.1032, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 12.380952380952381, |
|
"eval_dice_score": 0.9187127339740486, |
|
"eval_loss": 0.0883309617638588, |
|
"eval_runtime": 8.8468, |
|
"eval_samples_per_second": 10.06, |
|
"eval_steps_per_second": 1.017, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 12.404761904761905, |
|
"grad_norm": 0.4739639461040497, |
|
"learning_rate": 3.519047619047619e-05, |
|
"loss": 0.0934, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 12.428571428571429, |
|
"grad_norm": 0.3023685812950134, |
|
"learning_rate": 3.514285714285714e-05, |
|
"loss": 0.0798, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 12.452380952380953, |
|
"grad_norm": 0.49961069226264954, |
|
"learning_rate": 3.50952380952381e-05, |
|
"loss": 0.1129, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 12.476190476190476, |
|
"grad_norm": 0.2742755711078644, |
|
"learning_rate": 3.504761904761905e-05, |
|
"loss": 0.0854, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 12.5, |
|
"grad_norm": 0.3225800096988678, |
|
"learning_rate": 3.5000000000000004e-05, |
|
"loss": 0.1067, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 12.523809523809524, |
|
"grad_norm": 0.18689511716365814, |
|
"learning_rate": 3.4952380952380955e-05, |
|
"loss": 0.0837, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 12.547619047619047, |
|
"grad_norm": 0.4479936957359314, |
|
"learning_rate": 3.490476190476191e-05, |
|
"loss": 0.1043, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 12.571428571428571, |
|
"grad_norm": 0.229170560836792, |
|
"learning_rate": 3.485714285714286e-05, |
|
"loss": 0.0899, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 12.595238095238095, |
|
"grad_norm": 0.28260865807533264, |
|
"learning_rate": 3.480952380952382e-05, |
|
"loss": 0.0812, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 12.619047619047619, |
|
"grad_norm": 0.22655904293060303, |
|
"learning_rate": 3.476190476190477e-05, |
|
"loss": 0.0781, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 12.642857142857142, |
|
"grad_norm": 0.23291556537151337, |
|
"learning_rate": 3.471428571428572e-05, |
|
"loss": 0.0821, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 12.666666666666666, |
|
"grad_norm": 0.3438037931919098, |
|
"learning_rate": 3.4666666666666665e-05, |
|
"loss": 0.1208, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 12.69047619047619, |
|
"grad_norm": 0.4007132947444916, |
|
"learning_rate": 3.4619047619047616e-05, |
|
"loss": 0.1209, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 12.714285714285714, |
|
"grad_norm": 0.22857758402824402, |
|
"learning_rate": 3.457142857142857e-05, |
|
"loss": 0.0906, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 12.738095238095237, |
|
"grad_norm": 0.33069509267807007, |
|
"learning_rate": 3.452380952380952e-05, |
|
"loss": 0.0862, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 12.761904761904763, |
|
"grad_norm": 0.2187006175518036, |
|
"learning_rate": 3.447619047619048e-05, |
|
"loss": 0.0881, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 12.785714285714286, |
|
"grad_norm": 0.3787616193294525, |
|
"learning_rate": 3.442857142857143e-05, |
|
"loss": 0.1055, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 12.80952380952381, |
|
"grad_norm": 0.3690526485443115, |
|
"learning_rate": 3.438095238095238e-05, |
|
"loss": 0.0939, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 12.833333333333334, |
|
"grad_norm": 0.2734881341457367, |
|
"learning_rate": 3.433333333333333e-05, |
|
"loss": 0.0799, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 12.857142857142858, |
|
"grad_norm": 0.32879072427749634, |
|
"learning_rate": 3.4285714285714284e-05, |
|
"loss": 0.094, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 12.857142857142858, |
|
"eval_dice_score": 0.9214406627422178, |
|
"eval_loss": 0.08812714368104935, |
|
"eval_runtime": 8.9703, |
|
"eval_samples_per_second": 9.922, |
|
"eval_steps_per_second": 1.003, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 12.880952380952381, |
|
"grad_norm": 0.2516557276248932, |
|
"learning_rate": 3.4238095238095235e-05, |
|
"loss": 0.1029, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 12.904761904761905, |
|
"grad_norm": 0.28719547390937805, |
|
"learning_rate": 3.419047619047619e-05, |
|
"loss": 0.0961, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 12.928571428571429, |
|
"grad_norm": 0.2147558480501175, |
|
"learning_rate": 3.4142857142857145e-05, |
|
"loss": 0.0948, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 12.952380952380953, |
|
"grad_norm": 0.36119335889816284, |
|
"learning_rate": 3.40952380952381e-05, |
|
"loss": 0.0835, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 12.976190476190476, |
|
"grad_norm": 0.4117376208305359, |
|
"learning_rate": 3.404761904761905e-05, |
|
"loss": 0.0906, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"grad_norm": 0.27086928486824036, |
|
"learning_rate": 3.4e-05, |
|
"loss": 0.0933, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 13.023809523809524, |
|
"grad_norm": 0.17893396317958832, |
|
"learning_rate": 3.395238095238095e-05, |
|
"loss": 0.088, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 13.047619047619047, |
|
"grad_norm": 0.22478432953357697, |
|
"learning_rate": 3.39047619047619e-05, |
|
"loss": 0.0761, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 13.071428571428571, |
|
"grad_norm": 0.6636024713516235, |
|
"learning_rate": 3.3857142857142854e-05, |
|
"loss": 0.0998, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 13.095238095238095, |
|
"grad_norm": 0.2301420271396637, |
|
"learning_rate": 3.380952380952381e-05, |
|
"loss": 0.0837, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 13.119047619047619, |
|
"grad_norm": 0.32941392064094543, |
|
"learning_rate": 3.3761904761904764e-05, |
|
"loss": 0.1032, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 13.142857142857142, |
|
"grad_norm": 0.47822511196136475, |
|
"learning_rate": 3.3714285714285716e-05, |
|
"loss": 0.1213, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 13.166666666666666, |
|
"grad_norm": 0.38559892773628235, |
|
"learning_rate": 3.366666666666667e-05, |
|
"loss": 0.0967, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 13.19047619047619, |
|
"grad_norm": 0.3795962333679199, |
|
"learning_rate": 3.361904761904762e-05, |
|
"loss": 0.0848, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 13.214285714285714, |
|
"grad_norm": 0.33221033215522766, |
|
"learning_rate": 3.357142857142857e-05, |
|
"loss": 0.0829, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 13.238095238095237, |
|
"grad_norm": 0.2630172371864319, |
|
"learning_rate": 3.352380952380953e-05, |
|
"loss": 0.1104, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 13.261904761904763, |
|
"grad_norm": 0.2620716989040375, |
|
"learning_rate": 3.347619047619048e-05, |
|
"loss": 0.0883, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 13.285714285714286, |
|
"grad_norm": 0.4213337004184723, |
|
"learning_rate": 3.342857142857143e-05, |
|
"loss": 0.0923, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 13.30952380952381, |
|
"grad_norm": 0.17743177711963654, |
|
"learning_rate": 3.338095238095238e-05, |
|
"loss": 0.0811, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"grad_norm": 0.27336469292640686, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.0801, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 13.333333333333334, |
|
"eval_dice_score": 0.921020052010366, |
|
"eval_loss": 0.08740828186273575, |
|
"eval_runtime": 8.8356, |
|
"eval_samples_per_second": 10.073, |
|
"eval_steps_per_second": 1.019, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 13.357142857142858, |
|
"grad_norm": 0.1683669090270996, |
|
"learning_rate": 3.3285714285714286e-05, |
|
"loss": 0.0831, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 13.380952380952381, |
|
"grad_norm": 0.2313413918018341, |
|
"learning_rate": 3.323809523809524e-05, |
|
"loss": 0.0945, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 13.404761904761905, |
|
"grad_norm": 0.2842002511024475, |
|
"learning_rate": 3.3190476190476196e-05, |
|
"loss": 0.1027, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 13.428571428571429, |
|
"grad_norm": 0.39948076009750366, |
|
"learning_rate": 3.314285714285715e-05, |
|
"loss": 0.1064, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 13.452380952380953, |
|
"grad_norm": 0.2080223709344864, |
|
"learning_rate": 3.30952380952381e-05, |
|
"loss": 0.0781, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 13.476190476190476, |
|
"grad_norm": 0.39122873544692993, |
|
"learning_rate": 3.304761904761905e-05, |
|
"loss": 0.1042, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 13.5, |
|
"grad_norm": 0.1916409730911255, |
|
"learning_rate": 3.3e-05, |
|
"loss": 0.087, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 13.523809523809524, |
|
"grad_norm": 0.21745239198207855, |
|
"learning_rate": 3.2952380952380954e-05, |
|
"loss": 0.0863, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 13.547619047619047, |
|
"grad_norm": 0.2460186928510666, |
|
"learning_rate": 3.2904761904761906e-05, |
|
"loss": 0.1136, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 13.571428571428571, |
|
"grad_norm": 0.2653687596321106, |
|
"learning_rate": 3.2857142857142864e-05, |
|
"loss": 0.0737, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 13.595238095238095, |
|
"grad_norm": 0.261576384305954, |
|
"learning_rate": 3.280952380952381e-05, |
|
"loss": 0.0931, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 13.619047619047619, |
|
"grad_norm": 0.2636834383010864, |
|
"learning_rate": 3.276190476190476e-05, |
|
"loss": 0.0826, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 13.642857142857142, |
|
"grad_norm": 0.23599408566951752, |
|
"learning_rate": 3.271428571428571e-05, |
|
"loss": 0.0951, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 13.666666666666666, |
|
"grad_norm": 0.2800326347351074, |
|
"learning_rate": 3.266666666666666e-05, |
|
"loss": 0.103, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 13.69047619047619, |
|
"grad_norm": 0.5657874345779419, |
|
"learning_rate": 3.2619047619047615e-05, |
|
"loss": 0.1053, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 13.714285714285714, |
|
"grad_norm": 0.23854580521583557, |
|
"learning_rate": 3.2571428571428566e-05, |
|
"loss": 0.0909, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 13.738095238095237, |
|
"grad_norm": 0.21590976417064667, |
|
"learning_rate": 3.2523809523809525e-05, |
|
"loss": 0.1133, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 13.761904761904763, |
|
"grad_norm": 0.2769882082939148, |
|
"learning_rate": 3.2476190476190476e-05, |
|
"loss": 0.0795, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 13.785714285714286, |
|
"grad_norm": 0.47914767265319824, |
|
"learning_rate": 3.242857142857143e-05, |
|
"loss": 0.1, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 13.80952380952381, |
|
"grad_norm": 0.3476030230522156, |
|
"learning_rate": 3.238095238095238e-05, |
|
"loss": 0.0953, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 13.80952380952381, |
|
"eval_dice_score": 0.919850237804958, |
|
"eval_loss": 0.08700626343488693, |
|
"eval_runtime": 8.7245, |
|
"eval_samples_per_second": 10.201, |
|
"eval_steps_per_second": 1.032, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 13.833333333333334, |
|
"grad_norm": 0.7643526792526245, |
|
"learning_rate": 3.233333333333333e-05, |
|
"loss": 0.0931, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 13.857142857142858, |
|
"grad_norm": 0.25581708550453186, |
|
"learning_rate": 3.228571428571428e-05, |
|
"loss": 0.0783, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 13.880952380952381, |
|
"grad_norm": 0.17205297946929932, |
|
"learning_rate": 3.223809523809524e-05, |
|
"loss": 0.0924, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 13.904761904761905, |
|
"grad_norm": 0.21472246944904327, |
|
"learning_rate": 3.219047619047619e-05, |
|
"loss": 0.0827, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 13.928571428571429, |
|
"grad_norm": 0.16439926624298096, |
|
"learning_rate": 3.2142857142857144e-05, |
|
"loss": 0.0802, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 13.952380952380953, |
|
"grad_norm": 0.7240442037582397, |
|
"learning_rate": 3.2095238095238095e-05, |
|
"loss": 0.1241, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 13.976190476190476, |
|
"grad_norm": 0.20413804054260254, |
|
"learning_rate": 3.204761904761905e-05, |
|
"loss": 0.083, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"grad_norm": 0.5824400186538696, |
|
"learning_rate": 3.2e-05, |
|
"loss": 0.0981, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 14.023809523809524, |
|
"grad_norm": 0.7712008953094482, |
|
"learning_rate": 3.195238095238095e-05, |
|
"loss": 0.1011, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 14.047619047619047, |
|
"grad_norm": 0.24950990080833435, |
|
"learning_rate": 3.190476190476191e-05, |
|
"loss": 0.0825, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 14.071428571428571, |
|
"grad_norm": 0.9160091876983643, |
|
"learning_rate": 3.185714285714286e-05, |
|
"loss": 0.1299, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 14.095238095238095, |
|
"grad_norm": 0.4385213255882263, |
|
"learning_rate": 3.180952380952381e-05, |
|
"loss": 0.1004, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 14.119047619047619, |
|
"grad_norm": 0.27217090129852295, |
|
"learning_rate": 3.176190476190476e-05, |
|
"loss": 0.0853, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 14.142857142857142, |
|
"grad_norm": 0.3268568813800812, |
|
"learning_rate": 3.1714285714285715e-05, |
|
"loss": 0.0902, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 14.166666666666666, |
|
"grad_norm": 0.2754589021205902, |
|
"learning_rate": 3.1666666666666666e-05, |
|
"loss": 0.0906, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 14.19047619047619, |
|
"grad_norm": 0.16892971098423004, |
|
"learning_rate": 3.161904761904762e-05, |
|
"loss": 0.09, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 14.214285714285714, |
|
"grad_norm": 0.16637921333312988, |
|
"learning_rate": 3.1571428571428576e-05, |
|
"loss": 0.0899, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 14.238095238095237, |
|
"grad_norm": 0.22213107347488403, |
|
"learning_rate": 3.152380952380953e-05, |
|
"loss": 0.0772, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 14.261904761904763, |
|
"grad_norm": 0.556175172328949, |
|
"learning_rate": 3.147619047619048e-05, |
|
"loss": 0.0973, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"grad_norm": 0.2054072469472885, |
|
"learning_rate": 3.142857142857143e-05, |
|
"loss": 0.0841, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.285714285714286, |
|
"eval_dice_score": 0.9219252570440926, |
|
"eval_loss": 0.08547207713127136, |
|
"eval_runtime": 8.7966, |
|
"eval_samples_per_second": 10.117, |
|
"eval_steps_per_second": 1.023, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 14.30952380952381, |
|
"grad_norm": 0.2162121832370758, |
|
"learning_rate": 3.138095238095238e-05, |
|
"loss": 0.0864, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 14.333333333333334, |
|
"grad_norm": 0.19158528745174408, |
|
"learning_rate": 3.1333333333333334e-05, |
|
"loss": 0.0969, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 14.357142857142858, |
|
"grad_norm": 0.20269101858139038, |
|
"learning_rate": 3.1285714285714285e-05, |
|
"loss": 0.0871, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 14.380952380952381, |
|
"grad_norm": 0.4201182425022125, |
|
"learning_rate": 3.1238095238095244e-05, |
|
"loss": 0.0966, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 14.404761904761905, |
|
"grad_norm": 0.2613025903701782, |
|
"learning_rate": 3.1190476190476195e-05, |
|
"loss": 0.0884, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 14.428571428571429, |
|
"grad_norm": 0.19924290478229523, |
|
"learning_rate": 3.114285714285715e-05, |
|
"loss": 0.0896, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 14.452380952380953, |
|
"grad_norm": 0.17426665127277374, |
|
"learning_rate": 3.10952380952381e-05, |
|
"loss": 0.098, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 14.476190476190476, |
|
"grad_norm": 0.25578776001930237, |
|
"learning_rate": 3.104761904761905e-05, |
|
"loss": 0.082, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 14.5, |
|
"grad_norm": 0.7310316562652588, |
|
"learning_rate": 3.1e-05, |
|
"loss": 0.1199, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 14.523809523809524, |
|
"grad_norm": 0.223856583237648, |
|
"learning_rate": 3.095238095238096e-05, |
|
"loss": 0.0942, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 14.547619047619047, |
|
"grad_norm": 0.22037817537784576, |
|
"learning_rate": 3.0904761904761904e-05, |
|
"loss": 0.1062, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 14.571428571428571, |
|
"grad_norm": 0.2451525330543518, |
|
"learning_rate": 3.0857142857142856e-05, |
|
"loss": 0.108, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 14.595238095238095, |
|
"grad_norm": 0.2040538340806961, |
|
"learning_rate": 3.080952380952381e-05, |
|
"loss": 0.0892, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 14.619047619047619, |
|
"grad_norm": 0.2738243043422699, |
|
"learning_rate": 3.076190476190476e-05, |
|
"loss": 0.098, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 14.642857142857142, |
|
"grad_norm": 0.47061553597450256, |
|
"learning_rate": 3.071428571428571e-05, |
|
"loss": 0.0919, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 14.666666666666666, |
|
"grad_norm": 0.590526819229126, |
|
"learning_rate": 3.066666666666666e-05, |
|
"loss": 0.0851, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 14.69047619047619, |
|
"grad_norm": 0.2558976113796234, |
|
"learning_rate": 3.061904761904762e-05, |
|
"loss": 0.0781, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 14.714285714285714, |
|
"grad_norm": 0.406343936920166, |
|
"learning_rate": 3.057142857142857e-05, |
|
"loss": 0.0798, |
|
"step": 618 |
|
}, |
|
{ |
|
"epoch": 14.738095238095237, |
|
"grad_norm": 0.5376405119895935, |
|
"learning_rate": 3.0523809523809524e-05, |
|
"loss": 0.0992, |
|
"step": 619 |
|
}, |
|
{ |
|
"epoch": 14.761904761904763, |
|
"grad_norm": 0.359608918428421, |
|
"learning_rate": 3.0476190476190475e-05, |
|
"loss": 0.0821, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 14.761904761904763, |
|
"eval_dice_score": 0.921945237846471, |
|
"eval_loss": 0.08672548085451126, |
|
"eval_runtime": 8.8578, |
|
"eval_samples_per_second": 10.048, |
|
"eval_steps_per_second": 1.016, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 14.785714285714286, |
|
"grad_norm": 0.4258129894733429, |
|
"learning_rate": 3.0428571428571427e-05, |
|
"loss": 0.086, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 14.80952380952381, |
|
"grad_norm": 0.2852553129196167, |
|
"learning_rate": 3.038095238095238e-05, |
|
"loss": 0.1004, |
|
"step": 622 |
|
}, |
|
{ |
|
"epoch": 14.833333333333334, |
|
"grad_norm": 0.43911564350128174, |
|
"learning_rate": 3.0333333333333333e-05, |
|
"loss": 0.0763, |
|
"step": 623 |
|
}, |
|
{ |
|
"epoch": 14.857142857142858, |
|
"grad_norm": 0.26557502150535583, |
|
"learning_rate": 3.0285714285714285e-05, |
|
"loss": 0.0822, |
|
"step": 624 |
|
}, |
|
{ |
|
"epoch": 14.880952380952381, |
|
"grad_norm": 0.27375146746635437, |
|
"learning_rate": 3.023809523809524e-05, |
|
"loss": 0.0931, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 14.904761904761905, |
|
"grad_norm": 0.279346764087677, |
|
"learning_rate": 3.019047619047619e-05, |
|
"loss": 0.072, |
|
"step": 626 |
|
}, |
|
{ |
|
"epoch": 14.928571428571429, |
|
"grad_norm": 0.23838214576244354, |
|
"learning_rate": 3.0142857142857143e-05, |
|
"loss": 0.0806, |
|
"step": 627 |
|
}, |
|
{ |
|
"epoch": 14.952380952380953, |
|
"grad_norm": 0.16995024681091309, |
|
"learning_rate": 3.0095238095238094e-05, |
|
"loss": 0.0919, |
|
"step": 628 |
|
}, |
|
{ |
|
"epoch": 14.976190476190476, |
|
"grad_norm": 0.2560848593711853, |
|
"learning_rate": 3.004761904761905e-05, |
|
"loss": 0.0957, |
|
"step": 629 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"grad_norm": 0.3053649067878723, |
|
"learning_rate": 3e-05, |
|
"loss": 0.1034, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 15.023809523809524, |
|
"grad_norm": 0.1794043630361557, |
|
"learning_rate": 2.9952380952380952e-05, |
|
"loss": 0.0822, |
|
"step": 631 |
|
}, |
|
{ |
|
"epoch": 15.047619047619047, |
|
"grad_norm": 0.21695193648338318, |
|
"learning_rate": 2.9904761904761907e-05, |
|
"loss": 0.106, |
|
"step": 632 |
|
}, |
|
{ |
|
"epoch": 15.071428571428571, |
|
"grad_norm": 0.38391971588134766, |
|
"learning_rate": 2.985714285714286e-05, |
|
"loss": 0.0961, |
|
"step": 633 |
|
}, |
|
{ |
|
"epoch": 15.095238095238095, |
|
"grad_norm": 0.5132598280906677, |
|
"learning_rate": 2.980952380952381e-05, |
|
"loss": 0.1142, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 15.119047619047619, |
|
"grad_norm": 0.2727733850479126, |
|
"learning_rate": 2.9761904761904762e-05, |
|
"loss": 0.092, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 15.142857142857142, |
|
"grad_norm": 0.2456737458705902, |
|
"learning_rate": 2.9714285714285717e-05, |
|
"loss": 0.0793, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 15.166666666666666, |
|
"grad_norm": 0.2777925133705139, |
|
"learning_rate": 2.966666666666667e-05, |
|
"loss": 0.0848, |
|
"step": 637 |
|
}, |
|
{ |
|
"epoch": 15.19047619047619, |
|
"grad_norm": 0.5437929034233093, |
|
"learning_rate": 2.961904761904762e-05, |
|
"loss": 0.1062, |
|
"step": 638 |
|
}, |
|
{ |
|
"epoch": 15.214285714285714, |
|
"grad_norm": 0.2751214802265167, |
|
"learning_rate": 2.9571428571428575e-05, |
|
"loss": 0.0968, |
|
"step": 639 |
|
}, |
|
{ |
|
"epoch": 15.238095238095237, |
|
"grad_norm": 0.21941329538822174, |
|
"learning_rate": 2.9523809523809523e-05, |
|
"loss": 0.0828, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 15.238095238095237, |
|
"eval_dice_score": 0.92234169468477, |
|
"eval_loss": 0.08497842401266098, |
|
"eval_runtime": 8.8201, |
|
"eval_samples_per_second": 10.091, |
|
"eval_steps_per_second": 1.02, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 15.261904761904763, |
|
"grad_norm": 0.24691537022590637, |
|
"learning_rate": 2.9476190476190475e-05, |
|
"loss": 0.0884, |
|
"step": 641 |
|
}, |
|
{ |
|
"epoch": 15.285714285714286, |
|
"grad_norm": 0.2982950806617737, |
|
"learning_rate": 2.942857142857143e-05, |
|
"loss": 0.0957, |
|
"step": 642 |
|
}, |
|
{ |
|
"epoch": 15.30952380952381, |
|
"grad_norm": 0.2718714475631714, |
|
"learning_rate": 2.938095238095238e-05, |
|
"loss": 0.0883, |
|
"step": 643 |
|
}, |
|
{ |
|
"epoch": 15.333333333333334, |
|
"grad_norm": 0.29031896591186523, |
|
"learning_rate": 2.9333333333333333e-05, |
|
"loss": 0.0712, |
|
"step": 644 |
|
}, |
|
{ |
|
"epoch": 15.357142857142858, |
|
"grad_norm": 0.2872285842895508, |
|
"learning_rate": 2.9285714285714284e-05, |
|
"loss": 0.1028, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 15.380952380952381, |
|
"grad_norm": 0.3029137849807739, |
|
"learning_rate": 2.923809523809524e-05, |
|
"loss": 0.0778, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 15.404761904761905, |
|
"grad_norm": 0.3122120499610901, |
|
"learning_rate": 2.919047619047619e-05, |
|
"loss": 0.0728, |
|
"step": 647 |
|
}, |
|
{ |
|
"epoch": 15.428571428571429, |
|
"grad_norm": 0.28851887583732605, |
|
"learning_rate": 2.9142857142857142e-05, |
|
"loss": 0.0641, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 15.452380952380953, |
|
"grad_norm": 0.4654085338115692, |
|
"learning_rate": 2.9095238095238097e-05, |
|
"loss": 0.1011, |
|
"step": 649 |
|
}, |
|
{ |
|
"epoch": 15.476190476190476, |
|
"grad_norm": 0.32005026936531067, |
|
"learning_rate": 2.904761904761905e-05, |
|
"loss": 0.0953, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 15.5, |
|
"grad_norm": 0.2644088864326477, |
|
"learning_rate": 2.9e-05, |
|
"loss": 0.0854, |
|
"step": 651 |
|
}, |
|
{ |
|
"epoch": 15.523809523809524, |
|
"grad_norm": 0.37003859877586365, |
|
"learning_rate": 2.8952380952380955e-05, |
|
"loss": 0.0975, |
|
"step": 652 |
|
}, |
|
{ |
|
"epoch": 15.547619047619047, |
|
"grad_norm": 0.30068716406822205, |
|
"learning_rate": 2.8904761904761907e-05, |
|
"loss": 0.0806, |
|
"step": 653 |
|
}, |
|
{ |
|
"epoch": 15.571428571428571, |
|
"grad_norm": 0.2711564600467682, |
|
"learning_rate": 2.8857142857142858e-05, |
|
"loss": 0.0765, |
|
"step": 654 |
|
}, |
|
{ |
|
"epoch": 15.595238095238095, |
|
"grad_norm": 0.2577073574066162, |
|
"learning_rate": 2.880952380952381e-05, |
|
"loss": 0.0898, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 15.619047619047619, |
|
"grad_norm": 0.24594584107398987, |
|
"learning_rate": 2.8761904761904765e-05, |
|
"loss": 0.0844, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 15.642857142857142, |
|
"grad_norm": 0.22383585572242737, |
|
"learning_rate": 2.8714285714285716e-05, |
|
"loss": 0.094, |
|
"step": 657 |
|
}, |
|
{ |
|
"epoch": 15.666666666666666, |
|
"grad_norm": 0.3904590904712677, |
|
"learning_rate": 2.8666666666666668e-05, |
|
"loss": 0.0976, |
|
"step": 658 |
|
}, |
|
{ |
|
"epoch": 15.69047619047619, |
|
"grad_norm": 0.2078755795955658, |
|
"learning_rate": 2.8619047619047623e-05, |
|
"loss": 0.1011, |
|
"step": 659 |
|
}, |
|
{ |
|
"epoch": 15.714285714285714, |
|
"grad_norm": 0.35800865292549133, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.0914, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 15.714285714285714, |
|
"eval_dice_score": 0.9226904064180195, |
|
"eval_loss": 0.08527851849794388, |
|
"eval_runtime": 8.7169, |
|
"eval_samples_per_second": 10.21, |
|
"eval_steps_per_second": 1.032, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 15.738095238095237, |
|
"grad_norm": 0.30371710658073425, |
|
"learning_rate": 2.8523809523809522e-05, |
|
"loss": 0.0848, |
|
"step": 661 |
|
}, |
|
{ |
|
"epoch": 15.761904761904763, |
|
"grad_norm": 0.17579156160354614, |
|
"learning_rate": 2.8476190476190477e-05, |
|
"loss": 0.0881, |
|
"step": 662 |
|
}, |
|
{ |
|
"epoch": 15.785714285714286, |
|
"grad_norm": 0.2887127697467804, |
|
"learning_rate": 2.842857142857143e-05, |
|
"loss": 0.1119, |
|
"step": 663 |
|
}, |
|
{ |
|
"epoch": 15.80952380952381, |
|
"grad_norm": 0.20994120836257935, |
|
"learning_rate": 2.838095238095238e-05, |
|
"loss": 0.1133, |
|
"step": 664 |
|
}, |
|
{ |
|
"epoch": 15.833333333333334, |
|
"grad_norm": 0.23240219056606293, |
|
"learning_rate": 2.8333333333333332e-05, |
|
"loss": 0.0946, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 15.857142857142858, |
|
"grad_norm": 0.2476823329925537, |
|
"learning_rate": 2.8285714285714287e-05, |
|
"loss": 0.0881, |
|
"step": 666 |
|
}, |
|
{ |
|
"epoch": 15.880952380952381, |
|
"grad_norm": 0.5070866942405701, |
|
"learning_rate": 2.823809523809524e-05, |
|
"loss": 0.0869, |
|
"step": 667 |
|
}, |
|
{ |
|
"epoch": 15.904761904761905, |
|
"grad_norm": 0.2617017328739166, |
|
"learning_rate": 2.819047619047619e-05, |
|
"loss": 0.0823, |
|
"step": 668 |
|
}, |
|
{ |
|
"epoch": 15.928571428571429, |
|
"grad_norm": 0.2422521710395813, |
|
"learning_rate": 2.8142857142857145e-05, |
|
"loss": 0.0909, |
|
"step": 669 |
|
}, |
|
{ |
|
"epoch": 15.952380952380953, |
|
"grad_norm": 0.24192140996456146, |
|
"learning_rate": 2.8095238095238096e-05, |
|
"loss": 0.0836, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 15.976190476190476, |
|
"grad_norm": 0.32048043608665466, |
|
"learning_rate": 2.8047619047619048e-05, |
|
"loss": 0.0906, |
|
"step": 671 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.6783073544502258, |
|
"learning_rate": 2.8e-05, |
|
"loss": 0.0956, |
|
"step": 672 |
|
}, |
|
{ |
|
"epoch": 16.023809523809526, |
|
"grad_norm": 0.21156437695026398, |
|
"learning_rate": 2.7952380952380955e-05, |
|
"loss": 0.0833, |
|
"step": 673 |
|
}, |
|
{ |
|
"epoch": 16.047619047619047, |
|
"grad_norm": 0.28836673498153687, |
|
"learning_rate": 2.7904761904761906e-05, |
|
"loss": 0.0775, |
|
"step": 674 |
|
}, |
|
{ |
|
"epoch": 16.071428571428573, |
|
"grad_norm": 0.2659715712070465, |
|
"learning_rate": 2.7857142857142858e-05, |
|
"loss": 0.0855, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 16.095238095238095, |
|
"grad_norm": 0.2857109010219574, |
|
"learning_rate": 2.7809523809523813e-05, |
|
"loss": 0.0847, |
|
"step": 676 |
|
}, |
|
{ |
|
"epoch": 16.11904761904762, |
|
"grad_norm": 0.24819140136241913, |
|
"learning_rate": 2.7761904761904764e-05, |
|
"loss": 0.0854, |
|
"step": 677 |
|
}, |
|
{ |
|
"epoch": 16.142857142857142, |
|
"grad_norm": 0.22153520584106445, |
|
"learning_rate": 2.7714285714285716e-05, |
|
"loss": 0.0821, |
|
"step": 678 |
|
}, |
|
{ |
|
"epoch": 16.166666666666668, |
|
"grad_norm": 0.2610468566417694, |
|
"learning_rate": 2.766666666666667e-05, |
|
"loss": 0.115, |
|
"step": 679 |
|
}, |
|
{ |
|
"epoch": 16.19047619047619, |
|
"grad_norm": 0.19806088507175446, |
|
"learning_rate": 2.761904761904762e-05, |
|
"loss": 0.0746, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 16.19047619047619, |
|
"eval_dice_score": 0.9234079552722338, |
|
"eval_loss": 0.08446983993053436, |
|
"eval_runtime": 8.6982, |
|
"eval_samples_per_second": 10.232, |
|
"eval_steps_per_second": 1.035, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 16.214285714285715, |
|
"grad_norm": 0.30673784017562866, |
|
"learning_rate": 2.757142857142857e-05, |
|
"loss": 0.0779, |
|
"step": 681 |
|
}, |
|
{ |
|
"epoch": 16.238095238095237, |
|
"grad_norm": 0.2549842596054077, |
|
"learning_rate": 2.7523809523809522e-05, |
|
"loss": 0.0823, |
|
"step": 682 |
|
}, |
|
{ |
|
"epoch": 16.261904761904763, |
|
"grad_norm": 0.24637040495872498, |
|
"learning_rate": 2.7476190476190477e-05, |
|
"loss": 0.0739, |
|
"step": 683 |
|
}, |
|
{ |
|
"epoch": 16.285714285714285, |
|
"grad_norm": 0.2508900761604309, |
|
"learning_rate": 2.7428571428571428e-05, |
|
"loss": 0.0928, |
|
"step": 684 |
|
}, |
|
{ |
|
"epoch": 16.30952380952381, |
|
"grad_norm": 0.2124183028936386, |
|
"learning_rate": 2.738095238095238e-05, |
|
"loss": 0.0795, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 16.333333333333332, |
|
"grad_norm": 0.29679664969444275, |
|
"learning_rate": 2.7333333333333335e-05, |
|
"loss": 0.0809, |
|
"step": 686 |
|
}, |
|
{ |
|
"epoch": 16.357142857142858, |
|
"grad_norm": 0.193775936961174, |
|
"learning_rate": 2.7285714285714286e-05, |
|
"loss": 0.0813, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 16.38095238095238, |
|
"grad_norm": 0.17316065728664398, |
|
"learning_rate": 2.7238095238095238e-05, |
|
"loss": 0.0796, |
|
"step": 688 |
|
}, |
|
{ |
|
"epoch": 16.404761904761905, |
|
"grad_norm": 0.39464396238327026, |
|
"learning_rate": 2.7190476190476193e-05, |
|
"loss": 0.1009, |
|
"step": 689 |
|
}, |
|
{ |
|
"epoch": 16.428571428571427, |
|
"grad_norm": 0.2255675494670868, |
|
"learning_rate": 2.7142857142857144e-05, |
|
"loss": 0.0858, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 16.452380952380953, |
|
"grad_norm": 0.20199432969093323, |
|
"learning_rate": 2.7095238095238096e-05, |
|
"loss": 0.079, |
|
"step": 691 |
|
}, |
|
{ |
|
"epoch": 16.476190476190474, |
|
"grad_norm": 0.3646969199180603, |
|
"learning_rate": 2.7047619047619047e-05, |
|
"loss": 0.1239, |
|
"step": 692 |
|
}, |
|
{ |
|
"epoch": 16.5, |
|
"grad_norm": 0.20324058830738068, |
|
"learning_rate": 2.7000000000000002e-05, |
|
"loss": 0.0983, |
|
"step": 693 |
|
}, |
|
{ |
|
"epoch": 16.523809523809526, |
|
"grad_norm": 0.21907328069210052, |
|
"learning_rate": 2.6952380952380954e-05, |
|
"loss": 0.0836, |
|
"step": 694 |
|
}, |
|
{ |
|
"epoch": 16.547619047619047, |
|
"grad_norm": 0.27519315481185913, |
|
"learning_rate": 2.6904761904761905e-05, |
|
"loss": 0.0873, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 16.571428571428573, |
|
"grad_norm": 0.3098788261413574, |
|
"learning_rate": 2.685714285714286e-05, |
|
"loss": 0.1047, |
|
"step": 696 |
|
}, |
|
{ |
|
"epoch": 16.595238095238095, |
|
"grad_norm": 0.21530640125274658, |
|
"learning_rate": 2.6809523809523812e-05, |
|
"loss": 0.0742, |
|
"step": 697 |
|
}, |
|
{ |
|
"epoch": 16.61904761904762, |
|
"grad_norm": 0.19972284138202667, |
|
"learning_rate": 2.6761904761904763e-05, |
|
"loss": 0.0989, |
|
"step": 698 |
|
}, |
|
{ |
|
"epoch": 16.642857142857142, |
|
"grad_norm": 0.25694721937179565, |
|
"learning_rate": 2.671428571428571e-05, |
|
"loss": 0.0837, |
|
"step": 699 |
|
}, |
|
{ |
|
"epoch": 16.666666666666668, |
|
"grad_norm": 0.4011896252632141, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.1062, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 16.666666666666668, |
|
"eval_dice_score": 0.9230252870594667, |
|
"eval_loss": 0.08297789841890335, |
|
"eval_runtime": 8.732, |
|
"eval_samples_per_second": 10.192, |
|
"eval_steps_per_second": 1.031, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 16.69047619047619, |
|
"grad_norm": 0.26972779631614685, |
|
"learning_rate": 2.6619047619047618e-05, |
|
"loss": 0.0878, |
|
"step": 701 |
|
}, |
|
{ |
|
"epoch": 16.714285714285715, |
|
"grad_norm": 0.3032207190990448, |
|
"learning_rate": 2.657142857142857e-05, |
|
"loss": 0.0933, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 16.738095238095237, |
|
"grad_norm": 0.18521499633789062, |
|
"learning_rate": 2.6523809523809525e-05, |
|
"loss": 0.0846, |
|
"step": 703 |
|
}, |
|
{ |
|
"epoch": 16.761904761904763, |
|
"grad_norm": 0.27679115533828735, |
|
"learning_rate": 2.6476190476190476e-05, |
|
"loss": 0.0716, |
|
"step": 704 |
|
}, |
|
{ |
|
"epoch": 16.785714285714285, |
|
"grad_norm": 0.6110713481903076, |
|
"learning_rate": 2.6428571428571428e-05, |
|
"loss": 0.1121, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 16.80952380952381, |
|
"grad_norm": 0.1876603215932846, |
|
"learning_rate": 2.6380952380952383e-05, |
|
"loss": 0.0844, |
|
"step": 706 |
|
}, |
|
{ |
|
"epoch": 16.833333333333332, |
|
"grad_norm": 0.3138282001018524, |
|
"learning_rate": 2.6333333333333334e-05, |
|
"loss": 0.0904, |
|
"step": 707 |
|
}, |
|
{ |
|
"epoch": 16.857142857142858, |
|
"grad_norm": 0.20735274255275726, |
|
"learning_rate": 2.6285714285714286e-05, |
|
"loss": 0.0968, |
|
"step": 708 |
|
}, |
|
{ |
|
"epoch": 16.88095238095238, |
|
"grad_norm": 0.31236979365348816, |
|
"learning_rate": 2.6238095238095237e-05, |
|
"loss": 0.1186, |
|
"step": 709 |
|
}, |
|
{ |
|
"epoch": 16.904761904761905, |
|
"grad_norm": 0.295156866312027, |
|
"learning_rate": 2.6190476190476192e-05, |
|
"loss": 0.082, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 16.928571428571427, |
|
"grad_norm": 0.24397049844264984, |
|
"learning_rate": 2.6142857142857144e-05, |
|
"loss": 0.0965, |
|
"step": 711 |
|
}, |
|
{ |
|
"epoch": 16.952380952380953, |
|
"grad_norm": 0.2571556568145752, |
|
"learning_rate": 2.6095238095238095e-05, |
|
"loss": 0.0704, |
|
"step": 712 |
|
}, |
|
{ |
|
"epoch": 16.976190476190474, |
|
"grad_norm": 0.24409866333007812, |
|
"learning_rate": 2.604761904761905e-05, |
|
"loss": 0.1001, |
|
"step": 713 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"grad_norm": 0.20111119747161865, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 0.0824, |
|
"step": 714 |
|
}, |
|
{ |
|
"epoch": 17.023809523809526, |
|
"grad_norm": 0.7431234121322632, |
|
"learning_rate": 2.5952380952380953e-05, |
|
"loss": 0.0929, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 17.047619047619047, |
|
"grad_norm": 0.27233293652534485, |
|
"learning_rate": 2.5904761904761908e-05, |
|
"loss": 0.0917, |
|
"step": 716 |
|
}, |
|
{ |
|
"epoch": 17.071428571428573, |
|
"grad_norm": 0.24907705187797546, |
|
"learning_rate": 2.585714285714286e-05, |
|
"loss": 0.0951, |
|
"step": 717 |
|
}, |
|
{ |
|
"epoch": 17.095238095238095, |
|
"grad_norm": 0.23256130516529083, |
|
"learning_rate": 2.580952380952381e-05, |
|
"loss": 0.0925, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 17.11904761904762, |
|
"grad_norm": 0.5666278004646301, |
|
"learning_rate": 2.576190476190476e-05, |
|
"loss": 0.1042, |
|
"step": 719 |
|
}, |
|
{ |
|
"epoch": 17.142857142857142, |
|
"grad_norm": 0.2410479336977005, |
|
"learning_rate": 2.5714285714285714e-05, |
|
"loss": 0.0666, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 17.142857142857142, |
|
"eval_dice_score": 0.9229571361874431, |
|
"eval_loss": 0.08340851962566376, |
|
"eval_runtime": 8.7189, |
|
"eval_samples_per_second": 10.208, |
|
"eval_steps_per_second": 1.032, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 17.166666666666668, |
|
"grad_norm": 0.20309841632843018, |
|
"learning_rate": 2.5666666666666666e-05, |
|
"loss": 0.067, |
|
"step": 721 |
|
}, |
|
{ |
|
"epoch": 17.19047619047619, |
|
"grad_norm": 0.3129255473613739, |
|
"learning_rate": 2.5619047619047618e-05, |
|
"loss": 0.0894, |
|
"step": 722 |
|
}, |
|
{ |
|
"epoch": 17.214285714285715, |
|
"grad_norm": 0.27786898612976074, |
|
"learning_rate": 2.5571428571428572e-05, |
|
"loss": 0.0839, |
|
"step": 723 |
|
}, |
|
{ |
|
"epoch": 17.238095238095237, |
|
"grad_norm": 0.267407089471817, |
|
"learning_rate": 2.5523809523809524e-05, |
|
"loss": 0.1054, |
|
"step": 724 |
|
}, |
|
{ |
|
"epoch": 17.261904761904763, |
|
"grad_norm": 0.4915502965450287, |
|
"learning_rate": 2.5476190476190476e-05, |
|
"loss": 0.0938, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 17.285714285714285, |
|
"grad_norm": 0.21978172659873962, |
|
"learning_rate": 2.5428571428571427e-05, |
|
"loss": 0.0857, |
|
"step": 726 |
|
}, |
|
{ |
|
"epoch": 17.30952380952381, |
|
"grad_norm": 0.35925644636154175, |
|
"learning_rate": 2.5380952380952382e-05, |
|
"loss": 0.1016, |
|
"step": 727 |
|
}, |
|
{ |
|
"epoch": 17.333333333333332, |
|
"grad_norm": 0.2245437353849411, |
|
"learning_rate": 2.5333333333333334e-05, |
|
"loss": 0.0848, |
|
"step": 728 |
|
}, |
|
{ |
|
"epoch": 17.357142857142858, |
|
"grad_norm": 0.24263395369052887, |
|
"learning_rate": 2.5285714285714285e-05, |
|
"loss": 0.0948, |
|
"step": 729 |
|
}, |
|
{ |
|
"epoch": 17.38095238095238, |
|
"grad_norm": 0.21132177114486694, |
|
"learning_rate": 2.523809523809524e-05, |
|
"loss": 0.0953, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 17.404761904761905, |
|
"grad_norm": 0.6164664030075073, |
|
"learning_rate": 2.519047619047619e-05, |
|
"loss": 0.0985, |
|
"step": 731 |
|
}, |
|
{ |
|
"epoch": 17.428571428571427, |
|
"grad_norm": 0.34183213114738464, |
|
"learning_rate": 2.5142857142857143e-05, |
|
"loss": 0.1059, |
|
"step": 732 |
|
}, |
|
{ |
|
"epoch": 17.452380952380953, |
|
"grad_norm": 0.27018675208091736, |
|
"learning_rate": 2.5095238095238098e-05, |
|
"loss": 0.0955, |
|
"step": 733 |
|
}, |
|
{ |
|
"epoch": 17.476190476190474, |
|
"grad_norm": 0.291178435087204, |
|
"learning_rate": 2.504761904761905e-05, |
|
"loss": 0.0796, |
|
"step": 734 |
|
}, |
|
{ |
|
"epoch": 17.5, |
|
"grad_norm": 0.23403896391391754, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.0787, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 17.523809523809526, |
|
"grad_norm": 0.45682311058044434, |
|
"learning_rate": 2.4952380952380953e-05, |
|
"loss": 0.0884, |
|
"step": 736 |
|
}, |
|
{ |
|
"epoch": 17.547619047619047, |
|
"grad_norm": 0.23823651671409607, |
|
"learning_rate": 2.4904761904761908e-05, |
|
"loss": 0.0977, |
|
"step": 737 |
|
}, |
|
{ |
|
"epoch": 17.571428571428573, |
|
"grad_norm": 0.22397005558013916, |
|
"learning_rate": 2.485714285714286e-05, |
|
"loss": 0.0775, |
|
"step": 738 |
|
}, |
|
{ |
|
"epoch": 17.595238095238095, |
|
"grad_norm": 0.2246365249156952, |
|
"learning_rate": 2.4809523809523807e-05, |
|
"loss": 0.0685, |
|
"step": 739 |
|
}, |
|
{ |
|
"epoch": 17.61904761904762, |
|
"grad_norm": 0.2863618731498718, |
|
"learning_rate": 2.4761904761904762e-05, |
|
"loss": 0.0904, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 17.61904761904762, |
|
"eval_dice_score": 0.9211544390867972, |
|
"eval_loss": 0.0836789682507515, |
|
"eval_runtime": 8.7029, |
|
"eval_samples_per_second": 10.226, |
|
"eval_steps_per_second": 1.034, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 17.642857142857142, |
|
"grad_norm": 0.39199358224868774, |
|
"learning_rate": 2.4714285714285714e-05, |
|
"loss": 0.1074, |
|
"step": 741 |
|
}, |
|
{ |
|
"epoch": 17.666666666666668, |
|
"grad_norm": 0.29316845536231995, |
|
"learning_rate": 2.4666666666666665e-05, |
|
"loss": 0.0857, |
|
"step": 742 |
|
}, |
|
{ |
|
"epoch": 17.69047619047619, |
|
"grad_norm": 0.2797645926475525, |
|
"learning_rate": 2.461904761904762e-05, |
|
"loss": 0.0803, |
|
"step": 743 |
|
}, |
|
{ |
|
"epoch": 17.714285714285715, |
|
"grad_norm": 0.2353767305612564, |
|
"learning_rate": 2.4571428571428572e-05, |
|
"loss": 0.0861, |
|
"step": 744 |
|
}, |
|
{ |
|
"epoch": 17.738095238095237, |
|
"grad_norm": 0.24464334547519684, |
|
"learning_rate": 2.4523809523809523e-05, |
|
"loss": 0.0785, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 17.761904761904763, |
|
"grad_norm": 0.3035442531108856, |
|
"learning_rate": 2.4476190476190475e-05, |
|
"loss": 0.0896, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 17.785714285714285, |
|
"grad_norm": 0.20400097966194153, |
|
"learning_rate": 2.442857142857143e-05, |
|
"loss": 0.0898, |
|
"step": 747 |
|
}, |
|
{ |
|
"epoch": 17.80952380952381, |
|
"grad_norm": 0.23570357263088226, |
|
"learning_rate": 2.438095238095238e-05, |
|
"loss": 0.0836, |
|
"step": 748 |
|
}, |
|
{ |
|
"epoch": 17.833333333333332, |
|
"grad_norm": 0.321524053812027, |
|
"learning_rate": 2.4333333333333333e-05, |
|
"loss": 0.0744, |
|
"step": 749 |
|
}, |
|
{ |
|
"epoch": 17.857142857142858, |
|
"grad_norm": 0.22331471741199493, |
|
"learning_rate": 2.4285714285714288e-05, |
|
"loss": 0.0872, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 17.88095238095238, |
|
"grad_norm": 0.31584519147872925, |
|
"learning_rate": 2.423809523809524e-05, |
|
"loss": 0.0746, |
|
"step": 751 |
|
}, |
|
{ |
|
"epoch": 17.904761904761905, |
|
"grad_norm": 0.32943403720855713, |
|
"learning_rate": 2.419047619047619e-05, |
|
"loss": 0.0896, |
|
"step": 752 |
|
}, |
|
{ |
|
"epoch": 17.928571428571427, |
|
"grad_norm": 0.21786193549633026, |
|
"learning_rate": 2.4142857142857146e-05, |
|
"loss": 0.0828, |
|
"step": 753 |
|
}, |
|
{ |
|
"epoch": 17.952380952380953, |
|
"grad_norm": 0.17888040840625763, |
|
"learning_rate": 2.4095238095238098e-05, |
|
"loss": 0.0775, |
|
"step": 754 |
|
}, |
|
{ |
|
"epoch": 17.976190476190474, |
|
"grad_norm": 0.250286728143692, |
|
"learning_rate": 2.404761904761905e-05, |
|
"loss": 0.0844, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"grad_norm": 0.3801717162132263, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.0883, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 18.023809523809526, |
|
"grad_norm": 0.2086687684059143, |
|
"learning_rate": 2.3952380952380956e-05, |
|
"loss": 0.0784, |
|
"step": 757 |
|
}, |
|
{ |
|
"epoch": 18.047619047619047, |
|
"grad_norm": 0.23379236459732056, |
|
"learning_rate": 2.3904761904761904e-05, |
|
"loss": 0.0947, |
|
"step": 758 |
|
}, |
|
{ |
|
"epoch": 18.071428571428573, |
|
"grad_norm": 0.2616595923900604, |
|
"learning_rate": 2.3857142857142855e-05, |
|
"loss": 0.0869, |
|
"step": 759 |
|
}, |
|
{ |
|
"epoch": 18.095238095238095, |
|
"grad_norm": 0.18408074975013733, |
|
"learning_rate": 2.380952380952381e-05, |
|
"loss": 0.0791, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 18.095238095238095, |
|
"eval_dice_score": 0.9236365296024307, |
|
"eval_loss": 0.0821303203701973, |
|
"eval_runtime": 8.7731, |
|
"eval_samples_per_second": 10.145, |
|
"eval_steps_per_second": 1.026, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 18.11904761904762, |
|
"grad_norm": 0.17643745243549347, |
|
"learning_rate": 2.3761904761904762e-05, |
|
"loss": 0.0809, |
|
"step": 761 |
|
}, |
|
{ |
|
"epoch": 18.142857142857142, |
|
"grad_norm": 0.28116345405578613, |
|
"learning_rate": 2.3714285714285713e-05, |
|
"loss": 0.085, |
|
"step": 762 |
|
}, |
|
{ |
|
"epoch": 18.166666666666668, |
|
"grad_norm": 0.18162474036216736, |
|
"learning_rate": 2.3666666666666665e-05, |
|
"loss": 0.078, |
|
"step": 763 |
|
}, |
|
{ |
|
"epoch": 18.19047619047619, |
|
"grad_norm": 0.27260643243789673, |
|
"learning_rate": 2.361904761904762e-05, |
|
"loss": 0.0914, |
|
"step": 764 |
|
}, |
|
{ |
|
"epoch": 18.214285714285715, |
|
"grad_norm": 0.26358166337013245, |
|
"learning_rate": 2.357142857142857e-05, |
|
"loss": 0.0857, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 18.238095238095237, |
|
"grad_norm": 0.32413822412490845, |
|
"learning_rate": 2.3523809523809523e-05, |
|
"loss": 0.0894, |
|
"step": 766 |
|
}, |
|
{ |
|
"epoch": 18.261904761904763, |
|
"grad_norm": 0.23978236317634583, |
|
"learning_rate": 2.3476190476190478e-05, |
|
"loss": 0.091, |
|
"step": 767 |
|
}, |
|
{ |
|
"epoch": 18.285714285714285, |
|
"grad_norm": 0.3409174978733063, |
|
"learning_rate": 2.342857142857143e-05, |
|
"loss": 0.1105, |
|
"step": 768 |
|
}, |
|
{ |
|
"epoch": 18.30952380952381, |
|
"grad_norm": 0.1719333976507187, |
|
"learning_rate": 2.338095238095238e-05, |
|
"loss": 0.0799, |
|
"step": 769 |
|
}, |
|
{ |
|
"epoch": 18.333333333333332, |
|
"grad_norm": 0.2759392559528351, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"loss": 0.0943, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 18.357142857142858, |
|
"grad_norm": 0.2284536361694336, |
|
"learning_rate": 2.3285714285714287e-05, |
|
"loss": 0.0899, |
|
"step": 771 |
|
}, |
|
{ |
|
"epoch": 18.38095238095238, |
|
"grad_norm": 0.2436296045780182, |
|
"learning_rate": 2.323809523809524e-05, |
|
"loss": 0.0662, |
|
"step": 772 |
|
}, |
|
{ |
|
"epoch": 18.404761904761905, |
|
"grad_norm": 0.16819937527179718, |
|
"learning_rate": 2.319047619047619e-05, |
|
"loss": 0.0865, |
|
"step": 773 |
|
}, |
|
{ |
|
"epoch": 18.428571428571427, |
|
"grad_norm": 0.22437210381031036, |
|
"learning_rate": 2.3142857142857145e-05, |
|
"loss": 0.0849, |
|
"step": 774 |
|
}, |
|
{ |
|
"epoch": 18.452380952380953, |
|
"grad_norm": 0.28795716166496277, |
|
"learning_rate": 2.3095238095238097e-05, |
|
"loss": 0.0997, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 18.476190476190474, |
|
"grad_norm": 0.40768590569496155, |
|
"learning_rate": 2.304761904761905e-05, |
|
"loss": 0.1003, |
|
"step": 776 |
|
}, |
|
{ |
|
"epoch": 18.5, |
|
"grad_norm": 0.2193804681301117, |
|
"learning_rate": 2.3000000000000003e-05, |
|
"loss": 0.0896, |
|
"step": 777 |
|
}, |
|
{ |
|
"epoch": 18.523809523809526, |
|
"grad_norm": 0.29064154624938965, |
|
"learning_rate": 2.295238095238095e-05, |
|
"loss": 0.0778, |
|
"step": 778 |
|
}, |
|
{ |
|
"epoch": 18.547619047619047, |
|
"grad_norm": 0.39261043071746826, |
|
"learning_rate": 2.2904761904761903e-05, |
|
"loss": 0.0924, |
|
"step": 779 |
|
}, |
|
{ |
|
"epoch": 18.571428571428573, |
|
"grad_norm": 0.37924641370773315, |
|
"learning_rate": 2.2857142857142858e-05, |
|
"loss": 0.1048, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 18.571428571428573, |
|
"eval_dice_score": 0.9232294350220722, |
|
"eval_loss": 0.08147627115249634, |
|
"eval_runtime": 8.7455, |
|
"eval_samples_per_second": 10.177, |
|
"eval_steps_per_second": 1.029, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 18.595238095238095, |
|
"grad_norm": 0.24612796306610107, |
|
"learning_rate": 2.280952380952381e-05, |
|
"loss": 0.0774, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 18.61904761904762, |
|
"grad_norm": 0.18419839441776276, |
|
"learning_rate": 2.276190476190476e-05, |
|
"loss": 0.0829, |
|
"step": 782 |
|
}, |
|
{ |
|
"epoch": 18.642857142857142, |
|
"grad_norm": 0.19330959022045135, |
|
"learning_rate": 2.2714285714285713e-05, |
|
"loss": 0.0729, |
|
"step": 783 |
|
}, |
|
{ |
|
"epoch": 18.666666666666668, |
|
"grad_norm": 0.21217748522758484, |
|
"learning_rate": 2.2666666666666668e-05, |
|
"loss": 0.0729, |
|
"step": 784 |
|
}, |
|
{ |
|
"epoch": 18.69047619047619, |
|
"grad_norm": 0.22022300958633423, |
|
"learning_rate": 2.261904761904762e-05, |
|
"loss": 0.075, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 18.714285714285715, |
|
"grad_norm": 0.2307768315076828, |
|
"learning_rate": 2.257142857142857e-05, |
|
"loss": 0.0995, |
|
"step": 786 |
|
}, |
|
{ |
|
"epoch": 18.738095238095237, |
|
"grad_norm": 0.499103307723999, |
|
"learning_rate": 2.2523809523809526e-05, |
|
"loss": 0.0927, |
|
"step": 787 |
|
}, |
|
{ |
|
"epoch": 18.761904761904763, |
|
"grad_norm": 0.1646272838115692, |
|
"learning_rate": 2.2476190476190477e-05, |
|
"loss": 0.0728, |
|
"step": 788 |
|
}, |
|
{ |
|
"epoch": 18.785714285714285, |
|
"grad_norm": 0.19184449315071106, |
|
"learning_rate": 2.242857142857143e-05, |
|
"loss": 0.0768, |
|
"step": 789 |
|
}, |
|
{ |
|
"epoch": 18.80952380952381, |
|
"grad_norm": 0.3876766562461853, |
|
"learning_rate": 2.238095238095238e-05, |
|
"loss": 0.1064, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 18.833333333333332, |
|
"grad_norm": 0.18872225284576416, |
|
"learning_rate": 2.2333333333333335e-05, |
|
"loss": 0.0783, |
|
"step": 791 |
|
}, |
|
{ |
|
"epoch": 18.857142857142858, |
|
"grad_norm": 0.4279574453830719, |
|
"learning_rate": 2.2285714285714287e-05, |
|
"loss": 0.0971, |
|
"step": 792 |
|
}, |
|
{ |
|
"epoch": 18.88095238095238, |
|
"grad_norm": 0.1798621416091919, |
|
"learning_rate": 2.223809523809524e-05, |
|
"loss": 0.0862, |
|
"step": 793 |
|
}, |
|
{ |
|
"epoch": 18.904761904761905, |
|
"grad_norm": 0.4626399874687195, |
|
"learning_rate": 2.2190476190476193e-05, |
|
"loss": 0.0922, |
|
"step": 794 |
|
}, |
|
{ |
|
"epoch": 18.928571428571427, |
|
"grad_norm": 0.2156091034412384, |
|
"learning_rate": 2.2142857142857145e-05, |
|
"loss": 0.0826, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 18.952380952380953, |
|
"grad_norm": 0.30277004837989807, |
|
"learning_rate": 2.2095238095238096e-05, |
|
"loss": 0.0881, |
|
"step": 796 |
|
}, |
|
{ |
|
"epoch": 18.976190476190474, |
|
"grad_norm": 0.20252549648284912, |
|
"learning_rate": 2.204761904761905e-05, |
|
"loss": 0.0814, |
|
"step": 797 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"grad_norm": 0.3133484423160553, |
|
"learning_rate": 2.2e-05, |
|
"loss": 0.091, |
|
"step": 798 |
|
}, |
|
{ |
|
"epoch": 19.023809523809526, |
|
"grad_norm": 0.2910543978214264, |
|
"learning_rate": 2.195238095238095e-05, |
|
"loss": 0.0761, |
|
"step": 799 |
|
}, |
|
{ |
|
"epoch": 19.047619047619047, |
|
"grad_norm": 0.19995278120040894, |
|
"learning_rate": 2.1904761904761903e-05, |
|
"loss": 0.0935, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 19.047619047619047, |
|
"eval_dice_score": 0.9242091310096217, |
|
"eval_loss": 0.08286911994218826, |
|
"eval_runtime": 9.0935, |
|
"eval_samples_per_second": 9.787, |
|
"eval_steps_per_second": 0.99, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 19.071428571428573, |
|
"grad_norm": 0.29460981488227844, |
|
"learning_rate": 2.1857142857142857e-05, |
|
"loss": 0.0838, |
|
"step": 801 |
|
}, |
|
{ |
|
"epoch": 19.095238095238095, |
|
"grad_norm": 0.18889278173446655, |
|
"learning_rate": 2.180952380952381e-05, |
|
"loss": 0.1012, |
|
"step": 802 |
|
}, |
|
{ |
|
"epoch": 19.11904761904762, |
|
"grad_norm": 0.2679443061351776, |
|
"learning_rate": 2.176190476190476e-05, |
|
"loss": 0.0893, |
|
"step": 803 |
|
}, |
|
{ |
|
"epoch": 19.142857142857142, |
|
"grad_norm": 0.2455090433359146, |
|
"learning_rate": 2.1714285714285715e-05, |
|
"loss": 0.0715, |
|
"step": 804 |
|
}, |
|
{ |
|
"epoch": 19.166666666666668, |
|
"grad_norm": 0.17029725015163422, |
|
"learning_rate": 2.1666666666666667e-05, |
|
"loss": 0.0729, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 19.19047619047619, |
|
"grad_norm": 0.3500533699989319, |
|
"learning_rate": 2.161904761904762e-05, |
|
"loss": 0.0991, |
|
"step": 806 |
|
}, |
|
{ |
|
"epoch": 19.214285714285715, |
|
"grad_norm": 0.17729905247688293, |
|
"learning_rate": 2.1571428571428574e-05, |
|
"loss": 0.0822, |
|
"step": 807 |
|
}, |
|
{ |
|
"epoch": 19.238095238095237, |
|
"grad_norm": 0.24052225053310394, |
|
"learning_rate": 2.1523809523809525e-05, |
|
"loss": 0.0736, |
|
"step": 808 |
|
}, |
|
{ |
|
"epoch": 19.261904761904763, |
|
"grad_norm": 0.24907103180885315, |
|
"learning_rate": 2.1476190476190477e-05, |
|
"loss": 0.0874, |
|
"step": 809 |
|
}, |
|
{ |
|
"epoch": 19.285714285714285, |
|
"grad_norm": 0.2517390251159668, |
|
"learning_rate": 2.1428571428571428e-05, |
|
"loss": 0.087, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 19.30952380952381, |
|
"grad_norm": 0.21364130079746246, |
|
"learning_rate": 2.1380952380952383e-05, |
|
"loss": 0.0926, |
|
"step": 811 |
|
}, |
|
{ |
|
"epoch": 19.333333333333332, |
|
"grad_norm": 0.16167882084846497, |
|
"learning_rate": 2.1333333333333335e-05, |
|
"loss": 0.087, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 19.357142857142858, |
|
"grad_norm": 0.21866513788700104, |
|
"learning_rate": 2.1285714285714286e-05, |
|
"loss": 0.0732, |
|
"step": 813 |
|
}, |
|
{ |
|
"epoch": 19.38095238095238, |
|
"grad_norm": 0.30814534425735474, |
|
"learning_rate": 2.123809523809524e-05, |
|
"loss": 0.0951, |
|
"step": 814 |
|
}, |
|
{ |
|
"epoch": 19.404761904761905, |
|
"grad_norm": 0.20811501145362854, |
|
"learning_rate": 2.1190476190476193e-05, |
|
"loss": 0.0677, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 19.428571428571427, |
|
"grad_norm": 0.3216896057128906, |
|
"learning_rate": 2.1142857142857144e-05, |
|
"loss": 0.0872, |
|
"step": 816 |
|
}, |
|
{ |
|
"epoch": 19.452380952380953, |
|
"grad_norm": 0.3017369210720062, |
|
"learning_rate": 2.1095238095238096e-05, |
|
"loss": 0.0696, |
|
"step": 817 |
|
}, |
|
{ |
|
"epoch": 19.476190476190474, |
|
"grad_norm": 0.22521443665027618, |
|
"learning_rate": 2.1047619047619047e-05, |
|
"loss": 0.1094, |
|
"step": 818 |
|
}, |
|
{ |
|
"epoch": 19.5, |
|
"grad_norm": 0.1566617488861084, |
|
"learning_rate": 2.1e-05, |
|
"loss": 0.0851, |
|
"step": 819 |
|
}, |
|
{ |
|
"epoch": 19.523809523809526, |
|
"grad_norm": 0.2676796019077301, |
|
"learning_rate": 2.095238095238095e-05, |
|
"loss": 0.0745, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 19.523809523809526, |
|
"eval_dice_score": 0.9234152837764387, |
|
"eval_loss": 0.08154100179672241, |
|
"eval_runtime": 8.8155, |
|
"eval_samples_per_second": 10.096, |
|
"eval_steps_per_second": 1.021, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 19.547619047619047, |
|
"grad_norm": 0.2537257969379425, |
|
"learning_rate": 2.0904761904761905e-05, |
|
"loss": 0.0889, |
|
"step": 821 |
|
}, |
|
{ |
|
"epoch": 19.571428571428573, |
|
"grad_norm": 0.16671201586723328, |
|
"learning_rate": 2.0857142857142857e-05, |
|
"loss": 0.084, |
|
"step": 822 |
|
}, |
|
{ |
|
"epoch": 19.595238095238095, |
|
"grad_norm": 0.21541264653205872, |
|
"learning_rate": 2.080952380952381e-05, |
|
"loss": 0.0993, |
|
"step": 823 |
|
}, |
|
{ |
|
"epoch": 19.61904761904762, |
|
"grad_norm": 0.20101720094680786, |
|
"learning_rate": 2.0761904761904763e-05, |
|
"loss": 0.0906, |
|
"step": 824 |
|
}, |
|
{ |
|
"epoch": 19.642857142857142, |
|
"grad_norm": 0.155084490776062, |
|
"learning_rate": 2.0714285714285715e-05, |
|
"loss": 0.0703, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 19.666666666666668, |
|
"grad_norm": 0.19437827169895172, |
|
"learning_rate": 2.0666666666666666e-05, |
|
"loss": 0.0827, |
|
"step": 826 |
|
}, |
|
{ |
|
"epoch": 19.69047619047619, |
|
"grad_norm": 0.2180471420288086, |
|
"learning_rate": 2.0619047619047618e-05, |
|
"loss": 0.088, |
|
"step": 827 |
|
}, |
|
{ |
|
"epoch": 19.714285714285715, |
|
"grad_norm": 0.30691877007484436, |
|
"learning_rate": 2.0571428571428573e-05, |
|
"loss": 0.1097, |
|
"step": 828 |
|
}, |
|
{ |
|
"epoch": 19.738095238095237, |
|
"grad_norm": 0.16602082550525665, |
|
"learning_rate": 2.0523809523809524e-05, |
|
"loss": 0.0706, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 19.761904761904763, |
|
"grad_norm": 0.19758841395378113, |
|
"learning_rate": 2.0476190476190476e-05, |
|
"loss": 0.1042, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 19.785714285714285, |
|
"grad_norm": 0.2860798239707947, |
|
"learning_rate": 2.042857142857143e-05, |
|
"loss": 0.0872, |
|
"step": 831 |
|
}, |
|
{ |
|
"epoch": 19.80952380952381, |
|
"grad_norm": 0.175958052277565, |
|
"learning_rate": 2.0380952380952382e-05, |
|
"loss": 0.0812, |
|
"step": 832 |
|
}, |
|
{ |
|
"epoch": 19.833333333333332, |
|
"grad_norm": 0.2437559813261032, |
|
"learning_rate": 2.0333333333333334e-05, |
|
"loss": 0.0811, |
|
"step": 833 |
|
}, |
|
{ |
|
"epoch": 19.857142857142858, |
|
"grad_norm": 0.16146957874298096, |
|
"learning_rate": 2.028571428571429e-05, |
|
"loss": 0.0726, |
|
"step": 834 |
|
}, |
|
{ |
|
"epoch": 19.88095238095238, |
|
"grad_norm": 0.18311820924282074, |
|
"learning_rate": 2.023809523809524e-05, |
|
"loss": 0.0936, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 19.904761904761905, |
|
"grad_norm": 0.23156321048736572, |
|
"learning_rate": 2.0190476190476192e-05, |
|
"loss": 0.0959, |
|
"step": 836 |
|
}, |
|
{ |
|
"epoch": 19.928571428571427, |
|
"grad_norm": 0.6132747530937195, |
|
"learning_rate": 2.014285714285714e-05, |
|
"loss": 0.1083, |
|
"step": 837 |
|
}, |
|
{ |
|
"epoch": 19.952380952380953, |
|
"grad_norm": 0.2138746976852417, |
|
"learning_rate": 2.0095238095238095e-05, |
|
"loss": 0.0686, |
|
"step": 838 |
|
}, |
|
{ |
|
"epoch": 19.976190476190474, |
|
"grad_norm": 0.1906508058309555, |
|
"learning_rate": 2.0047619047619047e-05, |
|
"loss": 0.0819, |
|
"step": 839 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.19612105190753937, |
|
"learning_rate": 1.9999999999999998e-05, |
|
"loss": 0.0811, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_dice_score": 0.9237751835199779, |
|
"eval_loss": 0.08120287954807281, |
|
"eval_runtime": 8.9191, |
|
"eval_samples_per_second": 9.979, |
|
"eval_steps_per_second": 1.009, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 20.023809523809526, |
|
"grad_norm": 0.20686964690685272, |
|
"learning_rate": 1.9952380952380953e-05, |
|
"loss": 0.0828, |
|
"step": 841 |
|
}, |
|
{ |
|
"epoch": 20.047619047619047, |
|
"grad_norm": 0.19367481768131256, |
|
"learning_rate": 1.9904761904761905e-05, |
|
"loss": 0.077, |
|
"step": 842 |
|
}, |
|
{ |
|
"epoch": 20.071428571428573, |
|
"grad_norm": 0.24994997680187225, |
|
"learning_rate": 1.9857142857142856e-05, |
|
"loss": 0.0735, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 20.095238095238095, |
|
"grad_norm": 0.22139909863471985, |
|
"learning_rate": 1.980952380952381e-05, |
|
"loss": 0.0784, |
|
"step": 844 |
|
}, |
|
{ |
|
"epoch": 20.11904761904762, |
|
"grad_norm": 0.30801865458488464, |
|
"learning_rate": 1.9761904761904763e-05, |
|
"loss": 0.103, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 20.142857142857142, |
|
"grad_norm": 0.33742642402648926, |
|
"learning_rate": 1.9714285714285714e-05, |
|
"loss": 0.1045, |
|
"step": 846 |
|
}, |
|
{ |
|
"epoch": 20.166666666666668, |
|
"grad_norm": 0.22293123602867126, |
|
"learning_rate": 1.9666666666666666e-05, |
|
"loss": 0.0856, |
|
"step": 847 |
|
}, |
|
{ |
|
"epoch": 20.19047619047619, |
|
"grad_norm": 0.3034045398235321, |
|
"learning_rate": 1.961904761904762e-05, |
|
"loss": 0.0824, |
|
"step": 848 |
|
}, |
|
{ |
|
"epoch": 20.214285714285715, |
|
"grad_norm": 0.20768654346466064, |
|
"learning_rate": 1.9571428571428572e-05, |
|
"loss": 0.0766, |
|
"step": 849 |
|
}, |
|
{ |
|
"epoch": 20.238095238095237, |
|
"grad_norm": 0.22169268131256104, |
|
"learning_rate": 1.9523809523809524e-05, |
|
"loss": 0.0778, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 20.261904761904763, |
|
"grad_norm": 0.1683131903409958, |
|
"learning_rate": 1.947619047619048e-05, |
|
"loss": 0.0758, |
|
"step": 851 |
|
}, |
|
{ |
|
"epoch": 20.285714285714285, |
|
"grad_norm": 0.3005770742893219, |
|
"learning_rate": 1.942857142857143e-05, |
|
"loss": 0.1013, |
|
"step": 852 |
|
}, |
|
{ |
|
"epoch": 20.30952380952381, |
|
"grad_norm": 0.48266512155532837, |
|
"learning_rate": 1.9380952380952382e-05, |
|
"loss": 0.0957, |
|
"step": 853 |
|
}, |
|
{ |
|
"epoch": 20.333333333333332, |
|
"grad_norm": 0.37430715560913086, |
|
"learning_rate": 1.9333333333333333e-05, |
|
"loss": 0.0702, |
|
"step": 854 |
|
}, |
|
{ |
|
"epoch": 20.357142857142858, |
|
"grad_norm": 0.3011479675769806, |
|
"learning_rate": 1.928571428571429e-05, |
|
"loss": 0.0862, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 20.38095238095238, |
|
"grad_norm": 0.3303626477718353, |
|
"learning_rate": 1.923809523809524e-05, |
|
"loss": 0.081, |
|
"step": 856 |
|
}, |
|
{ |
|
"epoch": 20.404761904761905, |
|
"grad_norm": 0.1781713366508484, |
|
"learning_rate": 1.9190476190476188e-05, |
|
"loss": 0.0755, |
|
"step": 857 |
|
}, |
|
{ |
|
"epoch": 20.428571428571427, |
|
"grad_norm": 0.45133450627326965, |
|
"learning_rate": 1.9142857142857143e-05, |
|
"loss": 0.1125, |
|
"step": 858 |
|
}, |
|
{ |
|
"epoch": 20.452380952380953, |
|
"grad_norm": 0.3273387551307678, |
|
"learning_rate": 1.9095238095238095e-05, |
|
"loss": 0.1023, |
|
"step": 859 |
|
}, |
|
{ |
|
"epoch": 20.476190476190474, |
|
"grad_norm": 0.2446010559797287, |
|
"learning_rate": 1.9047619047619046e-05, |
|
"loss": 0.0905, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 20.476190476190474, |
|
"eval_dice_score": 0.9241981800769812, |
|
"eval_loss": 0.08037485182285309, |
|
"eval_runtime": 8.7085, |
|
"eval_samples_per_second": 10.22, |
|
"eval_steps_per_second": 1.033, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 20.5, |
|
"grad_norm": 0.3432914912700653, |
|
"learning_rate": 1.9e-05, |
|
"loss": 0.0834, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 20.523809523809526, |
|
"grad_norm": 0.3280867040157318, |
|
"learning_rate": 1.8952380952380953e-05, |
|
"loss": 0.091, |
|
"step": 862 |
|
}, |
|
{ |
|
"epoch": 20.547619047619047, |
|
"grad_norm": 0.21562229096889496, |
|
"learning_rate": 1.8904761904761904e-05, |
|
"loss": 0.0803, |
|
"step": 863 |
|
}, |
|
{ |
|
"epoch": 20.571428571428573, |
|
"grad_norm": 0.22910885512828827, |
|
"learning_rate": 1.8857142857142856e-05, |
|
"loss": 0.0763, |
|
"step": 864 |
|
}, |
|
{ |
|
"epoch": 20.595238095238095, |
|
"grad_norm": 0.20336787402629852, |
|
"learning_rate": 1.880952380952381e-05, |
|
"loss": 0.0839, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 20.61904761904762, |
|
"grad_norm": 0.22064198553562164, |
|
"learning_rate": 1.8761904761904762e-05, |
|
"loss": 0.081, |
|
"step": 866 |
|
}, |
|
{ |
|
"epoch": 20.642857142857142, |
|
"grad_norm": 0.2525925040245056, |
|
"learning_rate": 1.8714285714285714e-05, |
|
"loss": 0.0853, |
|
"step": 867 |
|
}, |
|
{ |
|
"epoch": 20.666666666666668, |
|
"grad_norm": 0.21353179216384888, |
|
"learning_rate": 1.866666666666667e-05, |
|
"loss": 0.0862, |
|
"step": 868 |
|
}, |
|
{ |
|
"epoch": 20.69047619047619, |
|
"grad_norm": 0.17838771641254425, |
|
"learning_rate": 1.861904761904762e-05, |
|
"loss": 0.0809, |
|
"step": 869 |
|
}, |
|
{ |
|
"epoch": 20.714285714285715, |
|
"grad_norm": 0.2589264512062073, |
|
"learning_rate": 1.8571428571428572e-05, |
|
"loss": 0.1112, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 20.738095238095237, |
|
"grad_norm": 0.2731986939907074, |
|
"learning_rate": 1.8523809523809527e-05, |
|
"loss": 0.0861, |
|
"step": 871 |
|
}, |
|
{ |
|
"epoch": 20.761904761904763, |
|
"grad_norm": 0.18752317130565643, |
|
"learning_rate": 1.8476190476190478e-05, |
|
"loss": 0.068, |
|
"step": 872 |
|
}, |
|
{ |
|
"epoch": 20.785714285714285, |
|
"grad_norm": 0.18171530961990356, |
|
"learning_rate": 1.842857142857143e-05, |
|
"loss": 0.0722, |
|
"step": 873 |
|
}, |
|
{ |
|
"epoch": 20.80952380952381, |
|
"grad_norm": 0.20566533505916595, |
|
"learning_rate": 1.838095238095238e-05, |
|
"loss": 0.0669, |
|
"step": 874 |
|
}, |
|
{ |
|
"epoch": 20.833333333333332, |
|
"grad_norm": 0.2387693226337433, |
|
"learning_rate": 1.8333333333333336e-05, |
|
"loss": 0.0868, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 20.857142857142858, |
|
"grad_norm": 0.2408524453639984, |
|
"learning_rate": 1.8285714285714288e-05, |
|
"loss": 0.0716, |
|
"step": 876 |
|
}, |
|
{ |
|
"epoch": 20.88095238095238, |
|
"grad_norm": 0.35495293140411377, |
|
"learning_rate": 1.8238095238095236e-05, |
|
"loss": 0.0902, |
|
"step": 877 |
|
}, |
|
{ |
|
"epoch": 20.904761904761905, |
|
"grad_norm": 0.19206169247627258, |
|
"learning_rate": 1.819047619047619e-05, |
|
"loss": 0.0771, |
|
"step": 878 |
|
}, |
|
{ |
|
"epoch": 20.928571428571427, |
|
"grad_norm": 0.22178664803504944, |
|
"learning_rate": 1.8142857142857142e-05, |
|
"loss": 0.1119, |
|
"step": 879 |
|
}, |
|
{ |
|
"epoch": 20.952380952380953, |
|
"grad_norm": 0.1991724669933319, |
|
"learning_rate": 1.8095238095238094e-05, |
|
"loss": 0.069, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 20.952380952380953, |
|
"eval_dice_score": 0.9220529684642296, |
|
"eval_loss": 0.08112800121307373, |
|
"eval_runtime": 8.7757, |
|
"eval_samples_per_second": 10.142, |
|
"eval_steps_per_second": 1.026, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 20.976190476190474, |
|
"grad_norm": 0.29708757996559143, |
|
"learning_rate": 1.804761904761905e-05, |
|
"loss": 0.0901, |
|
"step": 881 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"grad_norm": 0.28050467371940613, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.1043, |
|
"step": 882 |
|
}, |
|
{ |
|
"epoch": 21.023809523809526, |
|
"grad_norm": 0.25332918763160706, |
|
"learning_rate": 1.7952380952380952e-05, |
|
"loss": 0.0872, |
|
"step": 883 |
|
}, |
|
{ |
|
"epoch": 21.047619047619047, |
|
"grad_norm": 0.21389389038085938, |
|
"learning_rate": 1.7904761904761904e-05, |
|
"loss": 0.1038, |
|
"step": 884 |
|
}, |
|
{ |
|
"epoch": 21.071428571428573, |
|
"grad_norm": 0.2038821429014206, |
|
"learning_rate": 1.785714285714286e-05, |
|
"loss": 0.075, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 21.095238095238095, |
|
"grad_norm": 0.19971756637096405, |
|
"learning_rate": 1.780952380952381e-05, |
|
"loss": 0.0827, |
|
"step": 886 |
|
}, |
|
{ |
|
"epoch": 21.11904761904762, |
|
"grad_norm": 0.2837202548980713, |
|
"learning_rate": 1.776190476190476e-05, |
|
"loss": 0.0781, |
|
"step": 887 |
|
}, |
|
{ |
|
"epoch": 21.142857142857142, |
|
"grad_norm": 0.24689999222755432, |
|
"learning_rate": 1.7714285714285717e-05, |
|
"loss": 0.0746, |
|
"step": 888 |
|
}, |
|
{ |
|
"epoch": 21.166666666666668, |
|
"grad_norm": 0.19299085438251495, |
|
"learning_rate": 1.7666666666666668e-05, |
|
"loss": 0.0936, |
|
"step": 889 |
|
}, |
|
{ |
|
"epoch": 21.19047619047619, |
|
"grad_norm": 0.18479064106941223, |
|
"learning_rate": 1.761904761904762e-05, |
|
"loss": 0.0919, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 21.214285714285715, |
|
"grad_norm": 0.24078267812728882, |
|
"learning_rate": 1.757142857142857e-05, |
|
"loss": 0.0836, |
|
"step": 891 |
|
}, |
|
{ |
|
"epoch": 21.238095238095237, |
|
"grad_norm": 0.33140161633491516, |
|
"learning_rate": 1.7523809523809526e-05, |
|
"loss": 0.0873, |
|
"step": 892 |
|
}, |
|
{ |
|
"epoch": 21.261904761904763, |
|
"grad_norm": 0.22419366240501404, |
|
"learning_rate": 1.7476190476190478e-05, |
|
"loss": 0.0821, |
|
"step": 893 |
|
}, |
|
{ |
|
"epoch": 21.285714285714285, |
|
"grad_norm": 0.16673584282398224, |
|
"learning_rate": 1.742857142857143e-05, |
|
"loss": 0.0778, |
|
"step": 894 |
|
}, |
|
{ |
|
"epoch": 21.30952380952381, |
|
"grad_norm": 0.19760103523731232, |
|
"learning_rate": 1.7380952380952384e-05, |
|
"loss": 0.0738, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 21.333333333333332, |
|
"grad_norm": 0.22858861088752747, |
|
"learning_rate": 1.7333333333333332e-05, |
|
"loss": 0.0628, |
|
"step": 896 |
|
}, |
|
{ |
|
"epoch": 21.357142857142858, |
|
"grad_norm": 0.27508506178855896, |
|
"learning_rate": 1.7285714285714284e-05, |
|
"loss": 0.0824, |
|
"step": 897 |
|
}, |
|
{ |
|
"epoch": 21.38095238095238, |
|
"grad_norm": 0.2053472250699997, |
|
"learning_rate": 1.723809523809524e-05, |
|
"loss": 0.0791, |
|
"step": 898 |
|
}, |
|
{ |
|
"epoch": 21.404761904761905, |
|
"grad_norm": 0.28713539242744446, |
|
"learning_rate": 1.719047619047619e-05, |
|
"loss": 0.1038, |
|
"step": 899 |
|
}, |
|
{ |
|
"epoch": 21.428571428571427, |
|
"grad_norm": 0.20974591374397278, |
|
"learning_rate": 1.7142857142857142e-05, |
|
"loss": 0.0816, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 21.428571428571427, |
|
"eval_dice_score": 0.92420542823889, |
|
"eval_loss": 0.0804237425327301, |
|
"eval_runtime": 9.1587, |
|
"eval_samples_per_second": 9.718, |
|
"eval_steps_per_second": 0.983, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 21.452380952380953, |
|
"grad_norm": 0.18460780382156372, |
|
"learning_rate": 1.7095238095238093e-05, |
|
"loss": 0.085, |
|
"step": 901 |
|
}, |
|
{ |
|
"epoch": 21.476190476190474, |
|
"grad_norm": 0.4044642150402069, |
|
"learning_rate": 1.704761904761905e-05, |
|
"loss": 0.085, |
|
"step": 902 |
|
}, |
|
{ |
|
"epoch": 21.5, |
|
"grad_norm": 0.32045164704322815, |
|
"learning_rate": 1.7e-05, |
|
"loss": 0.0902, |
|
"step": 903 |
|
}, |
|
{ |
|
"epoch": 21.523809523809526, |
|
"grad_norm": 0.19330991804599762, |
|
"learning_rate": 1.695238095238095e-05, |
|
"loss": 0.0726, |
|
"step": 904 |
|
}, |
|
{ |
|
"epoch": 21.547619047619047, |
|
"grad_norm": 0.2264699935913086, |
|
"learning_rate": 1.6904761904761906e-05, |
|
"loss": 0.0879, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 21.571428571428573, |
|
"grad_norm": 0.25664040446281433, |
|
"learning_rate": 1.6857142857142858e-05, |
|
"loss": 0.083, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 21.595238095238095, |
|
"grad_norm": 0.28455498814582825, |
|
"learning_rate": 1.680952380952381e-05, |
|
"loss": 0.103, |
|
"step": 907 |
|
}, |
|
{ |
|
"epoch": 21.61904761904762, |
|
"grad_norm": 0.22873304784297943, |
|
"learning_rate": 1.6761904761904764e-05, |
|
"loss": 0.0927, |
|
"step": 908 |
|
}, |
|
{ |
|
"epoch": 21.642857142857142, |
|
"grad_norm": 0.20388808846473694, |
|
"learning_rate": 1.6714285714285716e-05, |
|
"loss": 0.0739, |
|
"step": 909 |
|
}, |
|
{ |
|
"epoch": 21.666666666666668, |
|
"grad_norm": 0.23918978869915009, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.0917, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 21.69047619047619, |
|
"grad_norm": 0.22957748174667358, |
|
"learning_rate": 1.661904761904762e-05, |
|
"loss": 0.0695, |
|
"step": 911 |
|
}, |
|
{ |
|
"epoch": 21.714285714285715, |
|
"grad_norm": 0.26606836915016174, |
|
"learning_rate": 1.6571428571428574e-05, |
|
"loss": 0.0953, |
|
"step": 912 |
|
}, |
|
{ |
|
"epoch": 21.738095238095237, |
|
"grad_norm": 0.29274919629096985, |
|
"learning_rate": 1.6523809523809525e-05, |
|
"loss": 0.0593, |
|
"step": 913 |
|
}, |
|
{ |
|
"epoch": 21.761904761904763, |
|
"grad_norm": 0.22514043748378754, |
|
"learning_rate": 1.6476190476190477e-05, |
|
"loss": 0.075, |
|
"step": 914 |
|
}, |
|
{ |
|
"epoch": 21.785714285714285, |
|
"grad_norm": 0.18376828730106354, |
|
"learning_rate": 1.6428571428571432e-05, |
|
"loss": 0.0791, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 21.80952380952381, |
|
"grad_norm": 0.4766561985015869, |
|
"learning_rate": 1.638095238095238e-05, |
|
"loss": 0.0971, |
|
"step": 916 |
|
}, |
|
{ |
|
"epoch": 21.833333333333332, |
|
"grad_norm": 0.2249252051115036, |
|
"learning_rate": 1.633333333333333e-05, |
|
"loss": 0.0792, |
|
"step": 917 |
|
}, |
|
{ |
|
"epoch": 21.857142857142858, |
|
"grad_norm": 0.3545120656490326, |
|
"learning_rate": 1.6285714285714283e-05, |
|
"loss": 0.1018, |
|
"step": 918 |
|
}, |
|
{ |
|
"epoch": 21.88095238095238, |
|
"grad_norm": 0.22790645062923431, |
|
"learning_rate": 1.6238095238095238e-05, |
|
"loss": 0.0781, |
|
"step": 919 |
|
}, |
|
{ |
|
"epoch": 21.904761904761905, |
|
"grad_norm": 0.1942400336265564, |
|
"learning_rate": 1.619047619047619e-05, |
|
"loss": 0.0828, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 21.904761904761905, |
|
"eval_dice_score": 0.9240529374274217, |
|
"eval_loss": 0.08054715394973755, |
|
"eval_runtime": 9.145, |
|
"eval_samples_per_second": 9.732, |
|
"eval_steps_per_second": 0.984, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 21.928571428571427, |
|
"grad_norm": 0.2710881531238556, |
|
"learning_rate": 1.614285714285714e-05, |
|
"loss": 0.0849, |
|
"step": 921 |
|
}, |
|
{ |
|
"epoch": 21.952380952380953, |
|
"grad_norm": 0.20673465728759766, |
|
"learning_rate": 1.6095238095238096e-05, |
|
"loss": 0.0931, |
|
"step": 922 |
|
}, |
|
{ |
|
"epoch": 21.976190476190474, |
|
"grad_norm": 0.37557193636894226, |
|
"learning_rate": 1.6047619047619048e-05, |
|
"loss": 0.0921, |
|
"step": 923 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"grad_norm": 0.29974475502967834, |
|
"learning_rate": 1.6e-05, |
|
"loss": 0.0946, |
|
"step": 924 |
|
}, |
|
{ |
|
"epoch": 22.023809523809526, |
|
"grad_norm": 0.2729737162590027, |
|
"learning_rate": 1.5952380952380954e-05, |
|
"loss": 0.0637, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 22.047619047619047, |
|
"grad_norm": 0.26678407192230225, |
|
"learning_rate": 1.5904761904761906e-05, |
|
"loss": 0.0829, |
|
"step": 926 |
|
}, |
|
{ |
|
"epoch": 22.071428571428573, |
|
"grad_norm": 0.2640640139579773, |
|
"learning_rate": 1.5857142857142857e-05, |
|
"loss": 0.0854, |
|
"step": 927 |
|
}, |
|
{ |
|
"epoch": 22.095238095238095, |
|
"grad_norm": 0.27443549036979675, |
|
"learning_rate": 1.580952380952381e-05, |
|
"loss": 0.0846, |
|
"step": 928 |
|
}, |
|
{ |
|
"epoch": 22.11904761904762, |
|
"grad_norm": 0.24837443232536316, |
|
"learning_rate": 1.5761904761904764e-05, |
|
"loss": 0.0705, |
|
"step": 929 |
|
}, |
|
{ |
|
"epoch": 22.142857142857142, |
|
"grad_norm": 0.17277519404888153, |
|
"learning_rate": 1.5714285714285715e-05, |
|
"loss": 0.0791, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 22.166666666666668, |
|
"grad_norm": 0.17575153708457947, |
|
"learning_rate": 1.5666666666666667e-05, |
|
"loss": 0.0742, |
|
"step": 931 |
|
}, |
|
{ |
|
"epoch": 22.19047619047619, |
|
"grad_norm": 0.20902031660079956, |
|
"learning_rate": 1.5619047619047622e-05, |
|
"loss": 0.0801, |
|
"step": 932 |
|
}, |
|
{ |
|
"epoch": 22.214285714285715, |
|
"grad_norm": 0.2587524652481079, |
|
"learning_rate": 1.5571428571428573e-05, |
|
"loss": 0.0998, |
|
"step": 933 |
|
}, |
|
{ |
|
"epoch": 22.238095238095237, |
|
"grad_norm": 0.24326211214065552, |
|
"learning_rate": 1.5523809523809525e-05, |
|
"loss": 0.09, |
|
"step": 934 |
|
}, |
|
{ |
|
"epoch": 22.261904761904763, |
|
"grad_norm": 0.15226134657859802, |
|
"learning_rate": 1.547619047619048e-05, |
|
"loss": 0.0771, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 22.285714285714285, |
|
"grad_norm": 0.2282165288925171, |
|
"learning_rate": 1.5428571428571428e-05, |
|
"loss": 0.1008, |
|
"step": 936 |
|
}, |
|
{ |
|
"epoch": 22.30952380952381, |
|
"grad_norm": 0.6688697934150696, |
|
"learning_rate": 1.538095238095238e-05, |
|
"loss": 0.1117, |
|
"step": 937 |
|
}, |
|
{ |
|
"epoch": 22.333333333333332, |
|
"grad_norm": 0.16388161480426788, |
|
"learning_rate": 1.533333333333333e-05, |
|
"loss": 0.0918, |
|
"step": 938 |
|
}, |
|
{ |
|
"epoch": 22.357142857142858, |
|
"grad_norm": 0.24473772943019867, |
|
"learning_rate": 1.5285714285714286e-05, |
|
"loss": 0.0811, |
|
"step": 939 |
|
}, |
|
{ |
|
"epoch": 22.38095238095238, |
|
"grad_norm": 0.29383912682533264, |
|
"learning_rate": 1.5238095238095238e-05, |
|
"loss": 0.0964, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 22.38095238095238, |
|
"eval_dice_score": 0.9245331000875532, |
|
"eval_loss": 0.08000662177801132, |
|
"eval_runtime": 8.8153, |
|
"eval_samples_per_second": 10.096, |
|
"eval_steps_per_second": 1.021, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 22.404761904761905, |
|
"grad_norm": 0.28534939885139465, |
|
"learning_rate": 1.519047619047619e-05, |
|
"loss": 0.0847, |
|
"step": 941 |
|
}, |
|
{ |
|
"epoch": 22.428571428571427, |
|
"grad_norm": 0.18751578032970428, |
|
"learning_rate": 1.5142857142857142e-05, |
|
"loss": 0.0729, |
|
"step": 942 |
|
}, |
|
{ |
|
"epoch": 22.452380952380953, |
|
"grad_norm": 0.14760446548461914, |
|
"learning_rate": 1.5095238095238096e-05, |
|
"loss": 0.0786, |
|
"step": 943 |
|
}, |
|
{ |
|
"epoch": 22.476190476190474, |
|
"grad_norm": 0.40601739287376404, |
|
"learning_rate": 1.5047619047619047e-05, |
|
"loss": 0.0667, |
|
"step": 944 |
|
}, |
|
{ |
|
"epoch": 22.5, |
|
"grad_norm": 0.28885194659233093, |
|
"learning_rate": 1.5e-05, |
|
"loss": 0.0886, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 22.523809523809526, |
|
"grad_norm": 0.2508743703365326, |
|
"learning_rate": 1.4952380952380954e-05, |
|
"loss": 0.0827, |
|
"step": 946 |
|
}, |
|
{ |
|
"epoch": 22.547619047619047, |
|
"grad_norm": 0.2167244702577591, |
|
"learning_rate": 1.4904761904761905e-05, |
|
"loss": 0.0742, |
|
"step": 947 |
|
}, |
|
{ |
|
"epoch": 22.571428571428573, |
|
"grad_norm": 0.22318218648433685, |
|
"learning_rate": 1.4857142857142858e-05, |
|
"loss": 0.0786, |
|
"step": 948 |
|
}, |
|
{ |
|
"epoch": 22.595238095238095, |
|
"grad_norm": 0.27486732602119446, |
|
"learning_rate": 1.480952380952381e-05, |
|
"loss": 0.0884, |
|
"step": 949 |
|
}, |
|
{ |
|
"epoch": 22.61904761904762, |
|
"grad_norm": 0.2715624272823334, |
|
"learning_rate": 1.4761904761904761e-05, |
|
"loss": 0.0893, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 22.642857142857142, |
|
"grad_norm": 0.18250462412834167, |
|
"learning_rate": 1.4714285714285715e-05, |
|
"loss": 0.0962, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 22.666666666666668, |
|
"grad_norm": 0.42171528935432434, |
|
"learning_rate": 1.4666666666666666e-05, |
|
"loss": 0.0766, |
|
"step": 952 |
|
}, |
|
{ |
|
"epoch": 22.69047619047619, |
|
"grad_norm": 0.15866506099700928, |
|
"learning_rate": 1.461904761904762e-05, |
|
"loss": 0.0734, |
|
"step": 953 |
|
}, |
|
{ |
|
"epoch": 22.714285714285715, |
|
"grad_norm": 0.19888657331466675, |
|
"learning_rate": 1.4571428571428571e-05, |
|
"loss": 0.0944, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 22.738095238095237, |
|
"grad_norm": 0.20909306406974792, |
|
"learning_rate": 1.4523809523809524e-05, |
|
"loss": 0.0964, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 22.761904761904763, |
|
"grad_norm": 0.28159016370773315, |
|
"learning_rate": 1.4476190476190478e-05, |
|
"loss": 0.0901, |
|
"step": 956 |
|
}, |
|
{ |
|
"epoch": 22.785714285714285, |
|
"grad_norm": 0.2649635672569275, |
|
"learning_rate": 1.4428571428571429e-05, |
|
"loss": 0.0655, |
|
"step": 957 |
|
}, |
|
{ |
|
"epoch": 22.80952380952381, |
|
"grad_norm": 0.22923074662685394, |
|
"learning_rate": 1.4380952380952382e-05, |
|
"loss": 0.1028, |
|
"step": 958 |
|
}, |
|
{ |
|
"epoch": 22.833333333333332, |
|
"grad_norm": 0.225973978638649, |
|
"learning_rate": 1.4333333333333334e-05, |
|
"loss": 0.0863, |
|
"step": 959 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"grad_norm": 0.2754770815372467, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.0893, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 22.857142857142858, |
|
"eval_dice_score": 0.925071224925649, |
|
"eval_loss": 0.08001391589641571, |
|
"eval_runtime": 8.8179, |
|
"eval_samples_per_second": 10.093, |
|
"eval_steps_per_second": 1.021, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 22.88095238095238, |
|
"grad_norm": 0.18644385039806366, |
|
"learning_rate": 1.4238095238095239e-05, |
|
"loss": 0.0813, |
|
"step": 961 |
|
}, |
|
{ |
|
"epoch": 22.904761904761905, |
|
"grad_norm": 0.18705585598945618, |
|
"learning_rate": 1.419047619047619e-05, |
|
"loss": 0.0642, |
|
"step": 962 |
|
}, |
|
{ |
|
"epoch": 22.928571428571427, |
|
"grad_norm": 0.2833781838417053, |
|
"learning_rate": 1.4142857142857143e-05, |
|
"loss": 0.0772, |
|
"step": 963 |
|
}, |
|
{ |
|
"epoch": 22.952380952380953, |
|
"grad_norm": 0.20696187019348145, |
|
"learning_rate": 1.4095238095238095e-05, |
|
"loss": 0.0883, |
|
"step": 964 |
|
}, |
|
{ |
|
"epoch": 22.976190476190474, |
|
"grad_norm": 0.24278227984905243, |
|
"learning_rate": 1.4047619047619048e-05, |
|
"loss": 0.0852, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"grad_norm": 0.29513198137283325, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.0855, |
|
"step": 966 |
|
}, |
|
{ |
|
"epoch": 23.023809523809526, |
|
"grad_norm": 0.18746696412563324, |
|
"learning_rate": 1.3952380952380953e-05, |
|
"loss": 0.0707, |
|
"step": 967 |
|
}, |
|
{ |
|
"epoch": 23.047619047619047, |
|
"grad_norm": 0.21581074595451355, |
|
"learning_rate": 1.3904761904761906e-05, |
|
"loss": 0.0887, |
|
"step": 968 |
|
}, |
|
{ |
|
"epoch": 23.071428571428573, |
|
"grad_norm": 0.33836206793785095, |
|
"learning_rate": 1.3857142857142858e-05, |
|
"loss": 0.118, |
|
"step": 969 |
|
}, |
|
{ |
|
"epoch": 23.095238095238095, |
|
"grad_norm": 0.2581540048122406, |
|
"learning_rate": 1.380952380952381e-05, |
|
"loss": 0.0985, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 23.11904761904762, |
|
"grad_norm": 0.1527651846408844, |
|
"learning_rate": 1.3761904761904761e-05, |
|
"loss": 0.0695, |
|
"step": 971 |
|
}, |
|
{ |
|
"epoch": 23.142857142857142, |
|
"grad_norm": 0.17830391228199005, |
|
"learning_rate": 1.3714285714285714e-05, |
|
"loss": 0.1029, |
|
"step": 972 |
|
}, |
|
{ |
|
"epoch": 23.166666666666668, |
|
"grad_norm": 0.4796157479286194, |
|
"learning_rate": 1.3666666666666667e-05, |
|
"loss": 0.093, |
|
"step": 973 |
|
}, |
|
{ |
|
"epoch": 23.19047619047619, |
|
"grad_norm": 0.21920832991600037, |
|
"learning_rate": 1.3619047619047619e-05, |
|
"loss": 0.0717, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 23.214285714285715, |
|
"grad_norm": 0.456992506980896, |
|
"learning_rate": 1.3571428571428572e-05, |
|
"loss": 0.0958, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 23.238095238095237, |
|
"grad_norm": 0.44602781534194946, |
|
"learning_rate": 1.3523809523809524e-05, |
|
"loss": 0.1064, |
|
"step": 976 |
|
}, |
|
{ |
|
"epoch": 23.261904761904763, |
|
"grad_norm": 0.19102871417999268, |
|
"learning_rate": 1.3476190476190477e-05, |
|
"loss": 0.0885, |
|
"step": 977 |
|
}, |
|
{ |
|
"epoch": 23.285714285714285, |
|
"grad_norm": 0.22834639251232147, |
|
"learning_rate": 1.342857142857143e-05, |
|
"loss": 0.0987, |
|
"step": 978 |
|
}, |
|
{ |
|
"epoch": 23.30952380952381, |
|
"grad_norm": 0.160812109708786, |
|
"learning_rate": 1.3380952380952382e-05, |
|
"loss": 0.0712, |
|
"step": 979 |
|
}, |
|
{ |
|
"epoch": 23.333333333333332, |
|
"grad_norm": 0.25260743498802185, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.0709, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 23.333333333333332, |
|
"eval_dice_score": 0.9249947230101319, |
|
"eval_loss": 0.07975644618272781, |
|
"eval_runtime": 9.1651, |
|
"eval_samples_per_second": 9.711, |
|
"eval_steps_per_second": 0.982, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 23.357142857142858, |
|
"grad_norm": 0.32486334443092346, |
|
"learning_rate": 1.3285714285714285e-05, |
|
"loss": 0.0828, |
|
"step": 981 |
|
}, |
|
{ |
|
"epoch": 23.38095238095238, |
|
"grad_norm": 0.27001824975013733, |
|
"learning_rate": 1.3238095238095238e-05, |
|
"loss": 0.0746, |
|
"step": 982 |
|
}, |
|
{ |
|
"epoch": 23.404761904761905, |
|
"grad_norm": 0.20627065002918243, |
|
"learning_rate": 1.3190476190476191e-05, |
|
"loss": 0.0768, |
|
"step": 983 |
|
}, |
|
{ |
|
"epoch": 23.428571428571427, |
|
"grad_norm": 0.29352235794067383, |
|
"learning_rate": 1.3142857142857143e-05, |
|
"loss": 0.0788, |
|
"step": 984 |
|
}, |
|
{ |
|
"epoch": 23.452380952380953, |
|
"grad_norm": 0.23943500220775604, |
|
"learning_rate": 1.3095238095238096e-05, |
|
"loss": 0.07, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 23.476190476190474, |
|
"grad_norm": 0.22379229962825775, |
|
"learning_rate": 1.3047619047619048e-05, |
|
"loss": 0.0785, |
|
"step": 986 |
|
}, |
|
{ |
|
"epoch": 23.5, |
|
"grad_norm": 0.3175750970840454, |
|
"learning_rate": 1.3000000000000001e-05, |
|
"loss": 0.0999, |
|
"step": 987 |
|
}, |
|
{ |
|
"epoch": 23.523809523809526, |
|
"grad_norm": 0.24017292261123657, |
|
"learning_rate": 1.2952380952380954e-05, |
|
"loss": 0.0867, |
|
"step": 988 |
|
}, |
|
{ |
|
"epoch": 23.547619047619047, |
|
"grad_norm": 0.33831965923309326, |
|
"learning_rate": 1.2904761904761906e-05, |
|
"loss": 0.0982, |
|
"step": 989 |
|
}, |
|
{ |
|
"epoch": 23.571428571428573, |
|
"grad_norm": 0.20093227922916412, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.074, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 23.595238095238095, |
|
"grad_norm": 0.22127002477645874, |
|
"learning_rate": 1.2809523809523809e-05, |
|
"loss": 0.0726, |
|
"step": 991 |
|
}, |
|
{ |
|
"epoch": 23.61904761904762, |
|
"grad_norm": 0.3663376271724701, |
|
"learning_rate": 1.2761904761904762e-05, |
|
"loss": 0.0784, |
|
"step": 992 |
|
}, |
|
{ |
|
"epoch": 23.642857142857142, |
|
"grad_norm": 0.5978325009346008, |
|
"learning_rate": 1.2714285714285714e-05, |
|
"loss": 0.0979, |
|
"step": 993 |
|
}, |
|
{ |
|
"epoch": 23.666666666666668, |
|
"grad_norm": 0.24766002595424652, |
|
"learning_rate": 1.2666666666666667e-05, |
|
"loss": 0.0875, |
|
"step": 994 |
|
}, |
|
{ |
|
"epoch": 23.69047619047619, |
|
"grad_norm": 0.34553080797195435, |
|
"learning_rate": 1.261904761904762e-05, |
|
"loss": 0.0939, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 23.714285714285715, |
|
"grad_norm": 0.26458320021629333, |
|
"learning_rate": 1.2571428571428572e-05, |
|
"loss": 0.0937, |
|
"step": 996 |
|
}, |
|
{ |
|
"epoch": 23.738095238095237, |
|
"grad_norm": 0.24503840506076813, |
|
"learning_rate": 1.2523809523809525e-05, |
|
"loss": 0.0836, |
|
"step": 997 |
|
}, |
|
{ |
|
"epoch": 23.761904761904763, |
|
"grad_norm": 0.2026822566986084, |
|
"learning_rate": 1.2476190476190476e-05, |
|
"loss": 0.0808, |
|
"step": 998 |
|
}, |
|
{ |
|
"epoch": 23.785714285714285, |
|
"grad_norm": 0.19072376191616058, |
|
"learning_rate": 1.242857142857143e-05, |
|
"loss": 0.0735, |
|
"step": 999 |
|
}, |
|
{ |
|
"epoch": 23.80952380952381, |
|
"grad_norm": 0.39544475078582764, |
|
"learning_rate": 1.2380952380952381e-05, |
|
"loss": 0.0771, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 23.80952380952381, |
|
"eval_dice_score": 0.9249777722999508, |
|
"eval_loss": 0.07983684539794922, |
|
"eval_runtime": 9.0081, |
|
"eval_samples_per_second": 9.88, |
|
"eval_steps_per_second": 0.999, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 23.833333333333332, |
|
"grad_norm": 0.2170388102531433, |
|
"learning_rate": 1.2333333333333333e-05, |
|
"loss": 0.0795, |
|
"step": 1001 |
|
}, |
|
{ |
|
"epoch": 23.857142857142858, |
|
"grad_norm": 0.12621523439884186, |
|
"learning_rate": 1.2285714285714286e-05, |
|
"loss": 0.0775, |
|
"step": 1002 |
|
}, |
|
{ |
|
"epoch": 23.88095238095238, |
|
"grad_norm": 0.21630653738975525, |
|
"learning_rate": 1.2238095238095237e-05, |
|
"loss": 0.0609, |
|
"step": 1003 |
|
}, |
|
{ |
|
"epoch": 23.904761904761905, |
|
"grad_norm": 0.17754028737545013, |
|
"learning_rate": 1.219047619047619e-05, |
|
"loss": 0.0806, |
|
"step": 1004 |
|
}, |
|
{ |
|
"epoch": 23.928571428571427, |
|
"grad_norm": 0.24530617892742157, |
|
"learning_rate": 1.2142857142857144e-05, |
|
"loss": 0.0696, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 23.952380952380953, |
|
"grad_norm": 0.4387490749359131, |
|
"learning_rate": 1.2095238095238096e-05, |
|
"loss": 0.0929, |
|
"step": 1006 |
|
}, |
|
{ |
|
"epoch": 23.976190476190474, |
|
"grad_norm": 0.19713687896728516, |
|
"learning_rate": 1.2047619047619049e-05, |
|
"loss": 0.073, |
|
"step": 1007 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 0.3298085927963257, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0952, |
|
"step": 1008 |
|
}, |
|
{ |
|
"epoch": 24.023809523809526, |
|
"grad_norm": 0.13209277391433716, |
|
"learning_rate": 1.1952380952380952e-05, |
|
"loss": 0.0674, |
|
"step": 1009 |
|
}, |
|
{ |
|
"epoch": 24.047619047619047, |
|
"grad_norm": 0.25918281078338623, |
|
"learning_rate": 1.1904761904761905e-05, |
|
"loss": 0.0972, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 24.071428571428573, |
|
"grad_norm": 0.18305575847625732, |
|
"learning_rate": 1.1857142857142857e-05, |
|
"loss": 0.0749, |
|
"step": 1011 |
|
}, |
|
{ |
|
"epoch": 24.095238095238095, |
|
"grad_norm": 0.22177988290786743, |
|
"learning_rate": 1.180952380952381e-05, |
|
"loss": 0.0948, |
|
"step": 1012 |
|
}, |
|
{ |
|
"epoch": 24.11904761904762, |
|
"grad_norm": 0.9742544293403625, |
|
"learning_rate": 1.1761904761904761e-05, |
|
"loss": 0.0748, |
|
"step": 1013 |
|
}, |
|
{ |
|
"epoch": 24.142857142857142, |
|
"grad_norm": 0.2539862096309662, |
|
"learning_rate": 1.1714285714285715e-05, |
|
"loss": 0.0737, |
|
"step": 1014 |
|
}, |
|
{ |
|
"epoch": 24.166666666666668, |
|
"grad_norm": 0.2597903311252594, |
|
"learning_rate": 1.1666666666666668e-05, |
|
"loss": 0.0694, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 24.19047619047619, |
|
"grad_norm": 0.2900591492652893, |
|
"learning_rate": 1.161904761904762e-05, |
|
"loss": 0.0904, |
|
"step": 1016 |
|
}, |
|
{ |
|
"epoch": 24.214285714285715, |
|
"grad_norm": 0.30920645594596863, |
|
"learning_rate": 1.1571428571428573e-05, |
|
"loss": 0.0873, |
|
"step": 1017 |
|
}, |
|
{ |
|
"epoch": 24.238095238095237, |
|
"grad_norm": 0.2939493954181671, |
|
"learning_rate": 1.1523809523809524e-05, |
|
"loss": 0.0803, |
|
"step": 1018 |
|
}, |
|
{ |
|
"epoch": 24.261904761904763, |
|
"grad_norm": 0.323430597782135, |
|
"learning_rate": 1.1476190476190476e-05, |
|
"loss": 0.0912, |
|
"step": 1019 |
|
}, |
|
{ |
|
"epoch": 24.285714285714285, |
|
"grad_norm": 0.17873886227607727, |
|
"learning_rate": 1.1428571428571429e-05, |
|
"loss": 0.0781, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 24.285714285714285, |
|
"eval_dice_score": 0.9251687151008064, |
|
"eval_loss": 0.07953391969203949, |
|
"eval_runtime": 8.8432, |
|
"eval_samples_per_second": 10.064, |
|
"eval_steps_per_second": 1.018, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 24.30952380952381, |
|
"grad_norm": 0.2174515575170517, |
|
"learning_rate": 1.138095238095238e-05, |
|
"loss": 0.0689, |
|
"step": 1021 |
|
}, |
|
{ |
|
"epoch": 24.333333333333332, |
|
"grad_norm": 0.25644710659980774, |
|
"learning_rate": 1.1333333333333334e-05, |
|
"loss": 0.0696, |
|
"step": 1022 |
|
}, |
|
{ |
|
"epoch": 24.357142857142858, |
|
"grad_norm": 0.3882044851779938, |
|
"learning_rate": 1.1285714285714285e-05, |
|
"loss": 0.0888, |
|
"step": 1023 |
|
}, |
|
{ |
|
"epoch": 24.38095238095238, |
|
"grad_norm": 0.23190882802009583, |
|
"learning_rate": 1.1238095238095239e-05, |
|
"loss": 0.0754, |
|
"step": 1024 |
|
}, |
|
{ |
|
"epoch": 24.404761904761905, |
|
"grad_norm": 0.17043378949165344, |
|
"learning_rate": 1.119047619047619e-05, |
|
"loss": 0.0791, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 24.428571428571427, |
|
"grad_norm": 0.33094340562820435, |
|
"learning_rate": 1.1142857142857143e-05, |
|
"loss": 0.0838, |
|
"step": 1026 |
|
}, |
|
{ |
|
"epoch": 24.452380952380953, |
|
"grad_norm": 0.22381150722503662, |
|
"learning_rate": 1.1095238095238097e-05, |
|
"loss": 0.1096, |
|
"step": 1027 |
|
}, |
|
{ |
|
"epoch": 24.476190476190474, |
|
"grad_norm": 0.18582071363925934, |
|
"learning_rate": 1.1047619047619048e-05, |
|
"loss": 0.0733, |
|
"step": 1028 |
|
}, |
|
{ |
|
"epoch": 24.5, |
|
"grad_norm": 0.16228701174259186, |
|
"learning_rate": 1.1e-05, |
|
"loss": 0.0776, |
|
"step": 1029 |
|
}, |
|
{ |
|
"epoch": 24.523809523809526, |
|
"grad_norm": 0.4295779764652252, |
|
"learning_rate": 1.0952380952380951e-05, |
|
"loss": 0.0953, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 24.547619047619047, |
|
"grad_norm": 0.2032005786895752, |
|
"learning_rate": 1.0904761904761905e-05, |
|
"loss": 0.0994, |
|
"step": 1031 |
|
}, |
|
{ |
|
"epoch": 24.571428571428573, |
|
"grad_norm": 0.4408031702041626, |
|
"learning_rate": 1.0857142857142858e-05, |
|
"loss": 0.0832, |
|
"step": 1032 |
|
}, |
|
{ |
|
"epoch": 24.595238095238095, |
|
"grad_norm": 0.2662723660469055, |
|
"learning_rate": 1.080952380952381e-05, |
|
"loss": 0.0718, |
|
"step": 1033 |
|
}, |
|
{ |
|
"epoch": 24.61904761904762, |
|
"grad_norm": 0.23538383841514587, |
|
"learning_rate": 1.0761904761904763e-05, |
|
"loss": 0.0834, |
|
"step": 1034 |
|
}, |
|
{ |
|
"epoch": 24.642857142857142, |
|
"grad_norm": 0.15239837765693665, |
|
"learning_rate": 1.0714285714285714e-05, |
|
"loss": 0.0809, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 24.666666666666668, |
|
"grad_norm": 0.20802651345729828, |
|
"learning_rate": 1.0666666666666667e-05, |
|
"loss": 0.0951, |
|
"step": 1036 |
|
}, |
|
{ |
|
"epoch": 24.69047619047619, |
|
"grad_norm": 0.1992301344871521, |
|
"learning_rate": 1.061904761904762e-05, |
|
"loss": 0.0803, |
|
"step": 1037 |
|
}, |
|
{ |
|
"epoch": 24.714285714285715, |
|
"grad_norm": 0.24126474559307098, |
|
"learning_rate": 1.0571428571428572e-05, |
|
"loss": 0.0766, |
|
"step": 1038 |
|
}, |
|
{ |
|
"epoch": 24.738095238095237, |
|
"grad_norm": 0.2718803286552429, |
|
"learning_rate": 1.0523809523809524e-05, |
|
"loss": 0.0871, |
|
"step": 1039 |
|
}, |
|
{ |
|
"epoch": 24.761904761904763, |
|
"grad_norm": 0.27064719796180725, |
|
"learning_rate": 1.0476190476190475e-05, |
|
"loss": 0.0857, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 24.761904761904763, |
|
"eval_dice_score": 0.9250793835928479, |
|
"eval_loss": 0.07883252203464508, |
|
"eval_runtime": 8.8276, |
|
"eval_samples_per_second": 10.082, |
|
"eval_steps_per_second": 1.02, |
|
"step": 1040 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1260, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 20, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.8144939555894067e+17, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|