|
{ |
|
"best_metric": 0.9980530178223753, |
|
"best_model_checkpoint": "ai_detector_v2/checkpoint-1210", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 1210, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.747252747252747e-06, |
|
"loss": 0.7391, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.494505494505494e-06, |
|
"loss": 0.4175, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.241758241758243e-06, |
|
"loss": 0.3476, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.0989010989010989e-05, |
|
"loss": 0.1961, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.3736263736263738e-05, |
|
"loss": 0.1436, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.6483516483516486e-05, |
|
"loss": 0.0595, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 0.0585, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 2.1978021978021977e-05, |
|
"loss": 0.0607, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.4725274725274727e-05, |
|
"loss": 0.041, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.7472527472527476e-05, |
|
"loss": 0.0561, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.021978021978022e-05, |
|
"loss": 0.0517, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.296703296703297e-05, |
|
"loss": 0.0581, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 3.571428571428572e-05, |
|
"loss": 0.0281, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.0612, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.120879120879121e-05, |
|
"loss": 0.0297, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.3956043956043955e-05, |
|
"loss": 0.021, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.670329670329671e-05, |
|
"loss": 0.019, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.945054945054945e-05, |
|
"loss": 0.0233, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.9755052051439074e-05, |
|
"loss": 0.0298, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.944886711573791e-05, |
|
"loss": 0.0421, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.914268218003674e-05, |
|
"loss": 0.0277, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.883649724433558e-05, |
|
"loss": 0.0361, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.8530312308634416e-05, |
|
"loss": 0.0579, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 4.822412737293325e-05, |
|
"loss": 0.0338, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.791794243723209e-05, |
|
"loss": 0.0366, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.761175750153093e-05, |
|
"loss": 0.0092, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.7305572565829766e-05, |
|
"loss": 0.063, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.69993876301286e-05, |
|
"loss": 0.0527, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.6693202694427433e-05, |
|
"loss": 0.023, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.6387017758726274e-05, |
|
"loss": 0.0437, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.608083282302511e-05, |
|
"loss": 0.0381, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.577464788732395e-05, |
|
"loss": 0.0288, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.546846295162278e-05, |
|
"loss": 0.0385, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.516227801592162e-05, |
|
"loss": 0.0442, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.485609308022046e-05, |
|
"loss": 0.0128, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.454990814451929e-05, |
|
"loss": 0.0189, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.4243723208818125e-05, |
|
"loss": 0.0219, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.3937538273116966e-05, |
|
"loss": 0.0296, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.363135333741581e-05, |
|
"loss": 0.058, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.332516840171464e-05, |
|
"loss": 0.0317, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.3018983466013475e-05, |
|
"loss": 0.0364, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 4.271279853031231e-05, |
|
"loss": 0.0239, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.240661359461115e-05, |
|
"loss": 0.0265, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.210042865890998e-05, |
|
"loss": 0.0046, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.179424372320882e-05, |
|
"loss": 0.0182, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.148805878750766e-05, |
|
"loss": 0.0255, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.118187385180649e-05, |
|
"loss": 0.0143, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 4.087568891610533e-05, |
|
"loss": 0.0348, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 4.0569503980404167e-05, |
|
"loss": 0.035, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.0263319044703e-05, |
|
"loss": 0.0397, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.9957134109001834e-05, |
|
"loss": 0.0194, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.9650949173300675e-05, |
|
"loss": 0.0075, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 3.9344764237599516e-05, |
|
"loss": 0.0025, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.903857930189835e-05, |
|
"loss": 0.002, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 3.8732394366197184e-05, |
|
"loss": 0.0093, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.8426209430496025e-05, |
|
"loss": 0.0027, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.812002449479486e-05, |
|
"loss": 0.011, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.781383955909369e-05, |
|
"loss": 0.0133, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.7507654623392526e-05, |
|
"loss": 0.0314, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.720146968769137e-05, |
|
"loss": 0.0187, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.9973790624531975, |
|
"eval_loss": 0.010242484509944916, |
|
"eval_runtime": 251.9013, |
|
"eval_samples_per_second": 53.013, |
|
"eval_steps_per_second": 3.315, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.689528475199021e-05, |
|
"loss": 0.0335, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 3.658909981628904e-05, |
|
"loss": 0.0224, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.6282914880587876e-05, |
|
"loss": 0.0192, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.597672994488671e-05, |
|
"loss": 0.0164, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.567054500918555e-05, |
|
"loss": 0.0109, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.5364360073484384e-05, |
|
"loss": 0.013, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 3.5058175137783225e-05, |
|
"loss": 0.0055, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.475199020208206e-05, |
|
"loss": 0.0191, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 3.44458052663809e-05, |
|
"loss": 0.0013, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 3.4139620330679734e-05, |
|
"loss": 0.0186, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 3.383343539497857e-05, |
|
"loss": 0.0001, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 3.35272504592774e-05, |
|
"loss": 0.0127, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 3.322106552357624e-05, |
|
"loss": 0.0052, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 3.291488058787508e-05, |
|
"loss": 0.0278, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.260869565217392e-05, |
|
"loss": 0.0015, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 3.230251071647275e-05, |
|
"loss": 0.0026, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 3.1996325780771585e-05, |
|
"loss": 0.0243, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 3.1690140845070426e-05, |
|
"loss": 0.0142, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 3.138395590936926e-05, |
|
"loss": 0.007, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.1077770973668093e-05, |
|
"loss": 0.0175, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.0771586037966934e-05, |
|
"loss": 0.0064, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 3.046540110226577e-05, |
|
"loss": 0.0168, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 3.015921616656461e-05, |
|
"loss": 0.002, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.9853031230863443e-05, |
|
"loss": 0.0129, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 2.9546846295162277e-05, |
|
"loss": 0.0134, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 2.9240661359461114e-05, |
|
"loss": 0.0194, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 2.8934476423759955e-05, |
|
"loss": 0.0096, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.862829148805879e-05, |
|
"loss": 0.0048, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 2.8322106552357626e-05, |
|
"loss": 0.0003, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 2.801592161665646e-05, |
|
"loss": 0.0115, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.77097366809553e-05, |
|
"loss": 0.0264, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.7403551745254135e-05, |
|
"loss": 0.0233, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.7097366809552972e-05, |
|
"loss": 0.0008, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 2.6791181873851806e-05, |
|
"loss": 0.0103, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.6484996938150647e-05, |
|
"loss": 0.0356, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 2.617881200244948e-05, |
|
"loss": 0.0115, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.5872627066748318e-05, |
|
"loss": 0.0229, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 2.5566442131047152e-05, |
|
"loss": 0.0163, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 2.526025719534599e-05, |
|
"loss": 0.0052, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.4954072259644827e-05, |
|
"loss": 0.0237, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 2.4647887323943664e-05, |
|
"loss": 0.0121, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 2.4341702388242498e-05, |
|
"loss": 0.0022, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 2.4035517452541335e-05, |
|
"loss": 0.0105, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 2.3729332516840173e-05, |
|
"loss": 0.0138, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.342314758113901e-05, |
|
"loss": 0.0019, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 2.3116962645437844e-05, |
|
"loss": 0.0075, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.2810777709736685e-05, |
|
"loss": 0.0103, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 2.250459277403552e-05, |
|
"loss": 0.0107, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.2198407838334356e-05, |
|
"loss": 0.0018, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.1892222902633193e-05, |
|
"loss": 0.0038, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.1586037966932027e-05, |
|
"loss": 0.0028, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 2.1279853031230864e-05, |
|
"loss": 0.0095, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 2.09736680955297e-05, |
|
"loss": 0.0106, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 2.066748315982854e-05, |
|
"loss": 0.0285, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.0361298224127373e-05, |
|
"loss": 0.017, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 2.005511328842621e-05, |
|
"loss": 0.0025, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 1.9748928352725048e-05, |
|
"loss": 0.0079, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.9442743417023885e-05, |
|
"loss": 0.0167, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 1.913655848132272e-05, |
|
"loss": 0.0202, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.8830373545621556e-05, |
|
"loss": 0.0053, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.8524188609920394e-05, |
|
"loss": 0.0079, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9980530178223753, |
|
"eval_loss": 0.00832486990839243, |
|
"eval_runtime": 254.4173, |
|
"eval_samples_per_second": 52.489, |
|
"eval_steps_per_second": 3.282, |
|
"step": 1210 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1815, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 6.066296372278518e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|