vuongnhathien's picture
End of training
0a9e741 verified
{
"best_metric": 0.42871516942977905,
"best_model_checkpoint": "./convnext-tiny-upgrade-1k-224-batch-32/checkpoint-5500",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 5500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"grad_norm": 9.459319114685059,
"learning_rate": 2.9975536558892034e-05,
"loss": 3.1737,
"step": 100
},
{
"epoch": 0.36,
"grad_norm": 15.453875541687012,
"learning_rate": 2.9902226030228252e-05,
"loss": 2.5523,
"step": 200
},
{
"epoch": 0.55,
"grad_norm": 14.928990364074707,
"learning_rate": 2.9780307537715396e-05,
"loss": 2.0438,
"step": 300
},
{
"epoch": 0.73,
"grad_norm": 14.474929809570312,
"learning_rate": 2.9610178754135005e-05,
"loss": 1.7769,
"step": 400
},
{
"epoch": 0.91,
"grad_norm": 16.29086685180664,
"learning_rate": 2.939239460421746e-05,
"loss": 1.5523,
"step": 500
},
{
"epoch": 1.0,
"eval_accuracy": 0.7009940357852883,
"eval_loss": 1.2082535028457642,
"eval_runtime": 49.3679,
"eval_samples_per_second": 50.944,
"eval_steps_per_second": 1.6,
"step": 550
},
{
"epoch": 1.09,
"grad_norm": 23.075525283813477,
"learning_rate": 2.9127665454592872e-05,
"loss": 1.448,
"step": 600
},
{
"epoch": 1.27,
"grad_norm": 22.86272430419922,
"learning_rate": 2.8816854796722754e-05,
"loss": 1.2812,
"step": 700
},
{
"epoch": 1.45,
"grad_norm": 18.48836898803711,
"learning_rate": 2.8460976430370375e-05,
"loss": 1.2282,
"step": 800
},
{
"epoch": 1.64,
"grad_norm": 32.918296813964844,
"learning_rate": 2.8061191156796658e-05,
"loss": 1.2056,
"step": 900
},
{
"epoch": 1.82,
"grad_norm": 19.546428680419922,
"learning_rate": 2.7618802992467718e-05,
"loss": 1.1179,
"step": 1000
},
{
"epoch": 2.0,
"grad_norm": 39.9352912902832,
"learning_rate": 2.7135254915624213e-05,
"loss": 1.0852,
"step": 1100
},
{
"epoch": 2.0,
"eval_accuracy": 0.7960238568588469,
"eval_loss": 0.7955119013786316,
"eval_runtime": 49.699,
"eval_samples_per_second": 50.605,
"eval_steps_per_second": 1.59,
"step": 1100
},
{
"epoch": 2.18,
"grad_norm": 31.628759384155273,
"learning_rate": 2.661212415958624e-05,
"loss": 1.0018,
"step": 1200
},
{
"epoch": 2.36,
"grad_norm": 20.639881134033203,
"learning_rate": 2.6051117068146073e-05,
"loss": 1.0004,
"step": 1300
},
{
"epoch": 2.55,
"grad_norm": 34.07547378540039,
"learning_rate": 2.5454063529829405e-05,
"loss": 0.9302,
"step": 1400
},
{
"epoch": 2.73,
"grad_norm": 21.45457649230957,
"learning_rate": 2.482291100917928e-05,
"loss": 0.9218,
"step": 1500
},
{
"epoch": 2.91,
"grad_norm": 16.903533935546875,
"learning_rate": 2.4159718194531573e-05,
"loss": 0.9179,
"step": 1600
},
{
"epoch": 3.0,
"eval_accuracy": 0.8258449304174951,
"eval_loss": 0.6425434947013855,
"eval_runtime": 50.6126,
"eval_samples_per_second": 49.691,
"eval_steps_per_second": 1.561,
"step": 1650
},
{
"epoch": 3.09,
"grad_norm": 20.296306610107422,
"learning_rate": 2.3466648283001542e-05,
"loss": 0.8552,
"step": 1700
},
{
"epoch": 3.27,
"grad_norm": 25.9653263092041,
"learning_rate": 2.274596192458443e-05,
"loss": 0.8283,
"step": 1800
},
{
"epoch": 3.45,
"grad_norm": 14.738792419433594,
"learning_rate": 2.2000009848385107e-05,
"loss": 0.8234,
"step": 1900
},
{
"epoch": 3.64,
"grad_norm": 17.272687911987305,
"learning_rate": 2.12312251950283e-05,
"loss": 0.8101,
"step": 2000
},
{
"epoch": 3.82,
"grad_norm": 19.54085922241211,
"learning_rate": 2.0442115580259615e-05,
"loss": 0.8235,
"step": 2100
},
{
"epoch": 4.0,
"grad_norm": 30.610021591186523,
"learning_rate": 1.963525491562421e-05,
"loss": 0.7621,
"step": 2200
},
{
"epoch": 4.0,
"eval_accuracy": 0.8548707753479126,
"eval_loss": 0.5425807237625122,
"eval_runtime": 49.9135,
"eval_samples_per_second": 50.387,
"eval_steps_per_second": 1.583,
"step": 2200
},
{
"epoch": 4.18,
"grad_norm": 17.56275749206543,
"learning_rate": 1.8813275012902307e-05,
"loss": 0.743,
"step": 2300
},
{
"epoch": 4.36,
"grad_norm": 21.964000701904297,
"learning_rate": 1.7978856999686182e-05,
"loss": 0.7278,
"step": 2400
},
{
"epoch": 4.55,
"grad_norm": 19.241918563842773,
"learning_rate": 1.713472257409928e-05,
"loss": 0.7047,
"step": 2500
},
{
"epoch": 4.73,
"grad_norm": 30.8186092376709,
"learning_rate": 1.6283625127182596e-05,
"loss": 0.7558,
"step": 2600
},
{
"epoch": 4.91,
"grad_norm": 16.47164535522461,
"learning_rate": 1.5428340761905444e-05,
"loss": 0.7506,
"step": 2700
},
{
"epoch": 5.0,
"eval_accuracy": 0.8624254473161034,
"eval_loss": 0.5018457770347595,
"eval_runtime": 49.7681,
"eval_samples_per_second": 50.534,
"eval_steps_per_second": 1.587,
"step": 2750
},
{
"epoch": 5.09,
"grad_norm": 21.695837020874023,
"learning_rate": 1.4571659238094557e-05,
"loss": 0.693,
"step": 2800
},
{
"epoch": 5.27,
"grad_norm": 19.108768463134766,
"learning_rate": 1.3716374872817408e-05,
"loss": 0.6944,
"step": 2900
},
{
"epoch": 5.45,
"grad_norm": 19.964969635009766,
"learning_rate": 1.2865277425900725e-05,
"loss": 0.656,
"step": 3000
},
{
"epoch": 5.64,
"grad_norm": 17.055917739868164,
"learning_rate": 1.2021143000313822e-05,
"loss": 0.6643,
"step": 3100
},
{
"epoch": 5.82,
"grad_norm": 15.315804481506348,
"learning_rate": 1.11867249870977e-05,
"loss": 0.6955,
"step": 3200
},
{
"epoch": 6.0,
"grad_norm": 33.08659744262695,
"learning_rate": 1.036474508437579e-05,
"loss": 0.6774,
"step": 3300
},
{
"epoch": 6.0,
"eval_accuracy": 0.868389662027833,
"eval_loss": 0.4792053699493408,
"eval_runtime": 49.8325,
"eval_samples_per_second": 50.469,
"eval_steps_per_second": 1.585,
"step": 3300
},
{
"epoch": 6.18,
"grad_norm": 20.445402145385742,
"learning_rate": 9.557884419740387e-06,
"loss": 0.6568,
"step": 3400
},
{
"epoch": 6.36,
"grad_norm": 15.636629104614258,
"learning_rate": 8.768774804971705e-06,
"loss": 0.606,
"step": 3500
},
{
"epoch": 6.55,
"grad_norm": 18.987926483154297,
"learning_rate": 7.999990151614895e-06,
"loss": 0.663,
"step": 3600
},
{
"epoch": 6.73,
"grad_norm": 14.477697372436523,
"learning_rate": 7.254038075415573e-06,
"loss": 0.6101,
"step": 3700
},
{
"epoch": 6.91,
"grad_norm": 24.2838191986084,
"learning_rate": 6.533351716998466e-06,
"loss": 0.6364,
"step": 3800
},
{
"epoch": 7.0,
"eval_accuracy": 0.8743538767395627,
"eval_loss": 0.4525967836380005,
"eval_runtime": 49.992,
"eval_samples_per_second": 50.308,
"eval_steps_per_second": 1.58,
"step": 3850
},
{
"epoch": 7.09,
"grad_norm": 23.1962947845459,
"learning_rate": 5.840281805468427e-06,
"loss": 0.5968,
"step": 3900
},
{
"epoch": 7.27,
"grad_norm": 21.290063858032227,
"learning_rate": 5.177088990820725e-06,
"loss": 0.6077,
"step": 4000
},
{
"epoch": 7.45,
"grad_norm": 25.02341079711914,
"learning_rate": 4.5459364701706e-06,
"loss": 0.6175,
"step": 4100
},
{
"epoch": 7.64,
"grad_norm": 21.104934692382812,
"learning_rate": 3.948882931853924e-06,
"loss": 0.5993,
"step": 4200
},
{
"epoch": 7.82,
"grad_norm": 17.36994171142578,
"learning_rate": 3.3878758404137627e-06,
"loss": 0.607,
"step": 4300
},
{
"epoch": 8.0,
"grad_norm": 35.08428955078125,
"learning_rate": 2.86474508437579e-06,
"loss": 0.5961,
"step": 4400
},
{
"epoch": 8.0,
"eval_accuracy": 0.8799204771371769,
"eval_loss": 0.4362061619758606,
"eval_runtime": 49.5582,
"eval_samples_per_second": 50.748,
"eval_steps_per_second": 1.594,
"step": 4400
},
{
"epoch": 8.18,
"grad_norm": 20.05402183532715,
"learning_rate": 2.38119700753228e-06,
"loss": 0.5695,
"step": 4500
},
{
"epoch": 8.36,
"grad_norm": 30.20684242248535,
"learning_rate": 1.9388088432033446e-06,
"loss": 0.5647,
"step": 4600
},
{
"epoch": 8.55,
"grad_norm": 21.600929260253906,
"learning_rate": 1.5390235696296268e-06,
"loss": 0.5637,
"step": 4700
},
{
"epoch": 8.73,
"grad_norm": 32.336669921875,
"learning_rate": 1.1831452032772499e-06,
"loss": 0.5916,
"step": 4800
},
{
"epoch": 8.91,
"grad_norm": 14.266773223876953,
"learning_rate": 8.723345454071308e-07,
"loss": 0.602,
"step": 4900
},
{
"epoch": 9.0,
"eval_accuracy": 0.882703777335984,
"eval_loss": 0.43156278133392334,
"eval_runtime": 50.0221,
"eval_samples_per_second": 50.278,
"eval_steps_per_second": 1.579,
"step": 4950
},
{
"epoch": 9.09,
"grad_norm": 27.497398376464844,
"learning_rate": 6.076053957825411e-07,
"loss": 0.5848,
"step": 5000
},
{
"epoch": 9.27,
"grad_norm": 17.4595890045166,
"learning_rate": 3.8982124586499804e-07,
"loss": 0.5724,
"step": 5100
},
{
"epoch": 9.45,
"grad_norm": 30.596479415893555,
"learning_rate": 2.1969246228460526e-07,
"loss": 0.5712,
"step": 5200
},
{
"epoch": 9.64,
"grad_norm": 17.515758514404297,
"learning_rate": 9.777396977174668e-08,
"loss": 0.5994,
"step": 5300
},
{
"epoch": 9.82,
"grad_norm": 25.090110778808594,
"learning_rate": 2.4463441107965278e-08,
"loss": 0.5685,
"step": 5400
},
{
"epoch": 10.0,
"grad_norm": 22.852954864501953,
"learning_rate": 0.0,
"loss": 0.5896,
"step": 5500
},
{
"epoch": 10.0,
"eval_accuracy": 0.8850894632206759,
"eval_loss": 0.42871516942977905,
"eval_runtime": 49.9143,
"eval_samples_per_second": 50.386,
"eval_steps_per_second": 1.583,
"step": 5500
},
{
"epoch": 10.0,
"step": 5500,
"total_flos": 4.4284716808619213e+18,
"train_loss": 0.8922947484796697,
"train_runtime": 5842.588,
"train_samples_per_second": 30.091,
"train_steps_per_second": 0.941
}
],
"logging_steps": 100,
"max_steps": 5500,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 4.4284716808619213e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}