karoladelk's picture
End of training
6af6313 verified
raw
history blame
18.7 kB
{
"best_metric": 0.894109396914446,
"best_model_checkpoint": "swin-tiny-patch4-window7-224-classification/checkpoint-1000",
"epoch": 9.975062344139651,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 5.313328742980957,
"learning_rate": 1e-05,
"loss": 2.0631,
"step": 10
},
{
"epoch": 0.2,
"grad_norm": 5.775210857391357,
"learning_rate": 2e-05,
"loss": 1.7746,
"step": 20
},
{
"epoch": 0.3,
"grad_norm": 4.816771984100342,
"learning_rate": 3e-05,
"loss": 1.5563,
"step": 30
},
{
"epoch": 0.4,
"grad_norm": 6.007688999176025,
"learning_rate": 4e-05,
"loss": 1.2361,
"step": 40
},
{
"epoch": 0.5,
"grad_norm": 7.807107925415039,
"learning_rate": 5e-05,
"loss": 1.0685,
"step": 50
},
{
"epoch": 0.6,
"grad_norm": 6.920202732086182,
"learning_rate": 6e-05,
"loss": 0.9716,
"step": 60
},
{
"epoch": 0.7,
"grad_norm": 5.678104877471924,
"learning_rate": 7e-05,
"loss": 0.9274,
"step": 70
},
{
"epoch": 0.8,
"grad_norm": 7.1128010749816895,
"learning_rate": 8e-05,
"loss": 0.8962,
"step": 80
},
{
"epoch": 0.9,
"grad_norm": 8.700326919555664,
"learning_rate": 9e-05,
"loss": 0.8924,
"step": 90
},
{
"epoch": 1.0,
"grad_norm": 7.496997833251953,
"learning_rate": 0.0001,
"loss": 0.8514,
"step": 100
},
{
"epoch": 1.0,
"eval_accuracy": 0.705820476858345,
"eval_loss": 0.7862311005592346,
"eval_runtime": 38.0587,
"eval_samples_per_second": 74.937,
"eval_steps_per_second": 1.182,
"step": 100
},
{
"epoch": 1.1,
"grad_norm": 5.842683792114258,
"learning_rate": 9.888888888888889e-05,
"loss": 0.7961,
"step": 110
},
{
"epoch": 1.2,
"grad_norm": 7.082647323608398,
"learning_rate": 9.777777777777778e-05,
"loss": 0.8179,
"step": 120
},
{
"epoch": 1.3,
"grad_norm": 6.531412601470947,
"learning_rate": 9.666666666666667e-05,
"loss": 0.772,
"step": 130
},
{
"epoch": 1.4,
"grad_norm": 4.981847763061523,
"learning_rate": 9.555555555555557e-05,
"loss": 0.7298,
"step": 140
},
{
"epoch": 1.5,
"grad_norm": 6.9995198249816895,
"learning_rate": 9.444444444444444e-05,
"loss": 0.6882,
"step": 150
},
{
"epoch": 1.6,
"grad_norm": 4.9766693115234375,
"learning_rate": 9.333333333333334e-05,
"loss": 0.7048,
"step": 160
},
{
"epoch": 1.7,
"grad_norm": 5.899988174438477,
"learning_rate": 9.222222222222223e-05,
"loss": 0.7254,
"step": 170
},
{
"epoch": 1.8,
"grad_norm": 4.040374755859375,
"learning_rate": 9.111111111111112e-05,
"loss": 0.6543,
"step": 180
},
{
"epoch": 1.9,
"grad_norm": 6.156548023223877,
"learning_rate": 9e-05,
"loss": 0.6329,
"step": 190
},
{
"epoch": 2.0,
"grad_norm": 6.268504619598389,
"learning_rate": 8.888888888888889e-05,
"loss": 0.631,
"step": 200
},
{
"epoch": 2.0,
"eval_accuracy": 0.7885694249649369,
"eval_loss": 0.5491923093795776,
"eval_runtime": 38.032,
"eval_samples_per_second": 74.989,
"eval_steps_per_second": 1.183,
"step": 200
},
{
"epoch": 2.09,
"grad_norm": 6.425785541534424,
"learning_rate": 8.777777777777778e-05,
"loss": 0.612,
"step": 210
},
{
"epoch": 2.19,
"grad_norm": 5.754735469818115,
"learning_rate": 8.666666666666667e-05,
"loss": 0.6117,
"step": 220
},
{
"epoch": 2.29,
"grad_norm": 5.51466703414917,
"learning_rate": 8.555555555555556e-05,
"loss": 0.6041,
"step": 230
},
{
"epoch": 2.39,
"grad_norm": 5.170792102813721,
"learning_rate": 8.444444444444444e-05,
"loss": 0.6057,
"step": 240
},
{
"epoch": 2.49,
"grad_norm": 5.319341659545898,
"learning_rate": 8.333333333333334e-05,
"loss": 0.556,
"step": 250
},
{
"epoch": 2.59,
"grad_norm": 7.033453941345215,
"learning_rate": 8.222222222222222e-05,
"loss": 0.5918,
"step": 260
},
{
"epoch": 2.69,
"grad_norm": 6.37441349029541,
"learning_rate": 8.111111111111112e-05,
"loss": 0.5603,
"step": 270
},
{
"epoch": 2.79,
"grad_norm": 4.926444053649902,
"learning_rate": 8e-05,
"loss": 0.5827,
"step": 280
},
{
"epoch": 2.89,
"grad_norm": 4.273934841156006,
"learning_rate": 7.88888888888889e-05,
"loss": 0.5406,
"step": 290
},
{
"epoch": 2.99,
"grad_norm": 5.528937816619873,
"learning_rate": 7.777777777777778e-05,
"loss": 0.5516,
"step": 300
},
{
"epoch": 2.99,
"eval_accuracy": 0.8078541374474053,
"eval_loss": 0.5194684863090515,
"eval_runtime": 38.7637,
"eval_samples_per_second": 73.574,
"eval_steps_per_second": 1.161,
"step": 300
},
{
"epoch": 3.09,
"grad_norm": 9.39556884765625,
"learning_rate": 7.666666666666667e-05,
"loss": 0.5039,
"step": 310
},
{
"epoch": 3.19,
"grad_norm": 5.658567428588867,
"learning_rate": 7.555555555555556e-05,
"loss": 0.497,
"step": 320
},
{
"epoch": 3.29,
"grad_norm": 5.974184513092041,
"learning_rate": 7.444444444444444e-05,
"loss": 0.5041,
"step": 330
},
{
"epoch": 3.39,
"grad_norm": 5.876557350158691,
"learning_rate": 7.333333333333333e-05,
"loss": 0.4978,
"step": 340
},
{
"epoch": 3.49,
"grad_norm": 7.005821704864502,
"learning_rate": 7.222222222222222e-05,
"loss": 0.5145,
"step": 350
},
{
"epoch": 3.59,
"grad_norm": 4.967439651489258,
"learning_rate": 7.111111111111112e-05,
"loss": 0.4803,
"step": 360
},
{
"epoch": 3.69,
"grad_norm": 4.976831436157227,
"learning_rate": 7e-05,
"loss": 0.4816,
"step": 370
},
{
"epoch": 3.79,
"grad_norm": 6.872544288635254,
"learning_rate": 6.88888888888889e-05,
"loss": 0.4811,
"step": 380
},
{
"epoch": 3.89,
"grad_norm": 5.97269868850708,
"learning_rate": 6.777777777777778e-05,
"loss": 0.4978,
"step": 390
},
{
"epoch": 3.99,
"grad_norm": 5.982359886169434,
"learning_rate": 6.666666666666667e-05,
"loss": 0.4597,
"step": 400
},
{
"epoch": 4.0,
"eval_accuracy": 0.8229312762973352,
"eval_loss": 0.48398053646087646,
"eval_runtime": 38.5586,
"eval_samples_per_second": 73.965,
"eval_steps_per_second": 1.167,
"step": 401
},
{
"epoch": 4.09,
"grad_norm": 4.751348972320557,
"learning_rate": 6.555555555555556e-05,
"loss": 0.4443,
"step": 410
},
{
"epoch": 4.19,
"grad_norm": 4.549768447875977,
"learning_rate": 6.444444444444446e-05,
"loss": 0.423,
"step": 420
},
{
"epoch": 4.29,
"grad_norm": 6.269441604614258,
"learning_rate": 6.333333333333333e-05,
"loss": 0.449,
"step": 430
},
{
"epoch": 4.39,
"grad_norm": 4.581325531005859,
"learning_rate": 6.222222222222222e-05,
"loss": 0.4099,
"step": 440
},
{
"epoch": 4.49,
"grad_norm": 5.31665563583374,
"learning_rate": 6.111111111111112e-05,
"loss": 0.4075,
"step": 450
},
{
"epoch": 4.59,
"grad_norm": 5.4510416984558105,
"learning_rate": 6e-05,
"loss": 0.4195,
"step": 460
},
{
"epoch": 4.69,
"grad_norm": 5.312283515930176,
"learning_rate": 5.8888888888888896e-05,
"loss": 0.3829,
"step": 470
},
{
"epoch": 4.79,
"grad_norm": 5.865201950073242,
"learning_rate": 5.7777777777777776e-05,
"loss": 0.4209,
"step": 480
},
{
"epoch": 4.89,
"grad_norm": 5.123416423797607,
"learning_rate": 5.666666666666667e-05,
"loss": 0.4094,
"step": 490
},
{
"epoch": 4.99,
"grad_norm": 4.805253982543945,
"learning_rate": 5.555555555555556e-05,
"loss": 0.4094,
"step": 500
},
{
"epoch": 5.0,
"eval_accuracy": 0.8597475455820477,
"eval_loss": 0.3945947587490082,
"eval_runtime": 38.9464,
"eval_samples_per_second": 73.229,
"eval_steps_per_second": 1.155,
"step": 501
},
{
"epoch": 5.09,
"grad_norm": 4.796308517456055,
"learning_rate": 5.4444444444444446e-05,
"loss": 0.3555,
"step": 510
},
{
"epoch": 5.19,
"grad_norm": 4.460272789001465,
"learning_rate": 5.333333333333333e-05,
"loss": 0.3719,
"step": 520
},
{
"epoch": 5.29,
"grad_norm": 5.035310745239258,
"learning_rate": 5.222222222222223e-05,
"loss": 0.3785,
"step": 530
},
{
"epoch": 5.39,
"grad_norm": 5.145148754119873,
"learning_rate": 5.111111111111111e-05,
"loss": 0.3881,
"step": 540
},
{
"epoch": 5.49,
"grad_norm": 6.646022319793701,
"learning_rate": 5e-05,
"loss": 0.3776,
"step": 550
},
{
"epoch": 5.59,
"grad_norm": 6.039920330047607,
"learning_rate": 4.888888888888889e-05,
"loss": 0.3891,
"step": 560
},
{
"epoch": 5.69,
"grad_norm": 4.1056623458862305,
"learning_rate": 4.7777777777777784e-05,
"loss": 0.3369,
"step": 570
},
{
"epoch": 5.79,
"grad_norm": 5.633683204650879,
"learning_rate": 4.666666666666667e-05,
"loss": 0.3606,
"step": 580
},
{
"epoch": 5.89,
"grad_norm": 5.873307704925537,
"learning_rate": 4.555555555555556e-05,
"loss": 0.3386,
"step": 590
},
{
"epoch": 5.99,
"grad_norm": 5.3684983253479,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.3289,
"step": 600
},
{
"epoch": 6.0,
"eval_accuracy": 0.8572931276297335,
"eval_loss": 0.3786664605140686,
"eval_runtime": 40.3623,
"eval_samples_per_second": 70.66,
"eval_steps_per_second": 1.115,
"step": 601
},
{
"epoch": 6.08,
"grad_norm": 4.093196392059326,
"learning_rate": 4.3333333333333334e-05,
"loss": 0.3251,
"step": 610
},
{
"epoch": 6.18,
"grad_norm": 4.739712715148926,
"learning_rate": 4.222222222222222e-05,
"loss": 0.3029,
"step": 620
},
{
"epoch": 6.28,
"grad_norm": 3.9042539596557617,
"learning_rate": 4.111111111111111e-05,
"loss": 0.3269,
"step": 630
},
{
"epoch": 6.38,
"grad_norm": 5.695065498352051,
"learning_rate": 4e-05,
"loss": 0.3318,
"step": 640
},
{
"epoch": 6.48,
"grad_norm": 4.814879894256592,
"learning_rate": 3.888888888888889e-05,
"loss": 0.3087,
"step": 650
},
{
"epoch": 6.58,
"grad_norm": 4.633285999298096,
"learning_rate": 3.777777777777778e-05,
"loss": 0.3416,
"step": 660
},
{
"epoch": 6.68,
"grad_norm": 4.862430095672607,
"learning_rate": 3.6666666666666666e-05,
"loss": 0.3063,
"step": 670
},
{
"epoch": 6.78,
"grad_norm": 4.930345058441162,
"learning_rate": 3.555555555555556e-05,
"loss": 0.3225,
"step": 680
},
{
"epoch": 6.88,
"grad_norm": 4.587200164794922,
"learning_rate": 3.444444444444445e-05,
"loss": 0.3139,
"step": 690
},
{
"epoch": 6.98,
"grad_norm": 5.570033550262451,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.3145,
"step": 700
},
{
"epoch": 6.99,
"eval_accuracy": 0.8772791023842917,
"eval_loss": 0.3536257743835449,
"eval_runtime": 42.7073,
"eval_samples_per_second": 66.78,
"eval_steps_per_second": 1.054,
"step": 701
},
{
"epoch": 7.08,
"grad_norm": 5.562831401824951,
"learning_rate": 3.222222222222223e-05,
"loss": 0.2998,
"step": 710
},
{
"epoch": 7.18,
"grad_norm": 5.306057453155518,
"learning_rate": 3.111111111111111e-05,
"loss": 0.2807,
"step": 720
},
{
"epoch": 7.28,
"grad_norm": 5.671096324920654,
"learning_rate": 3e-05,
"loss": 0.2794,
"step": 730
},
{
"epoch": 7.38,
"grad_norm": 5.412108421325684,
"learning_rate": 2.8888888888888888e-05,
"loss": 0.2814,
"step": 740
},
{
"epoch": 7.48,
"grad_norm": 4.249139308929443,
"learning_rate": 2.777777777777778e-05,
"loss": 0.2531,
"step": 750
},
{
"epoch": 7.58,
"grad_norm": 5.714820861816406,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.2712,
"step": 760
},
{
"epoch": 7.68,
"grad_norm": 5.9426469802856445,
"learning_rate": 2.5555555555555554e-05,
"loss": 0.2589,
"step": 770
},
{
"epoch": 7.78,
"grad_norm": 4.787610054016113,
"learning_rate": 2.4444444444444445e-05,
"loss": 0.2745,
"step": 780
},
{
"epoch": 7.88,
"grad_norm": 3.9021761417388916,
"learning_rate": 2.3333333333333336e-05,
"loss": 0.2728,
"step": 790
},
{
"epoch": 7.98,
"grad_norm": 5.153603553771973,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.2911,
"step": 800
},
{
"epoch": 8.0,
"eval_accuracy": 0.885343618513324,
"eval_loss": 0.32486388087272644,
"eval_runtime": 42.9219,
"eval_samples_per_second": 66.446,
"eval_steps_per_second": 1.048,
"step": 802
},
{
"epoch": 8.08,
"grad_norm": 5.644151210784912,
"learning_rate": 2.111111111111111e-05,
"loss": 0.2559,
"step": 810
},
{
"epoch": 8.18,
"grad_norm": 4.023806095123291,
"learning_rate": 2e-05,
"loss": 0.2415,
"step": 820
},
{
"epoch": 8.28,
"grad_norm": 6.214153289794922,
"learning_rate": 1.888888888888889e-05,
"loss": 0.2447,
"step": 830
},
{
"epoch": 8.38,
"grad_norm": 3.903085231781006,
"learning_rate": 1.777777777777778e-05,
"loss": 0.2602,
"step": 840
},
{
"epoch": 8.48,
"grad_norm": 5.1828131675720215,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.2529,
"step": 850
},
{
"epoch": 8.58,
"grad_norm": 4.840915203094482,
"learning_rate": 1.5555555555555555e-05,
"loss": 0.2302,
"step": 860
},
{
"epoch": 8.68,
"grad_norm": 5.424919128417969,
"learning_rate": 1.4444444444444444e-05,
"loss": 0.246,
"step": 870
},
{
"epoch": 8.78,
"grad_norm": 6.8232316970825195,
"learning_rate": 1.3333333333333333e-05,
"loss": 0.2435,
"step": 880
},
{
"epoch": 8.88,
"grad_norm": 4.856207847595215,
"learning_rate": 1.2222222222222222e-05,
"loss": 0.2376,
"step": 890
},
{
"epoch": 8.98,
"grad_norm": 5.239475250244141,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.2487,
"step": 900
},
{
"epoch": 9.0,
"eval_accuracy": 0.8937587657784011,
"eval_loss": 0.30939263105392456,
"eval_runtime": 39.5102,
"eval_samples_per_second": 72.184,
"eval_steps_per_second": 1.139,
"step": 902
},
{
"epoch": 9.08,
"grad_norm": 5.607595443725586,
"learning_rate": 1e-05,
"loss": 0.2253,
"step": 910
},
{
"epoch": 9.18,
"grad_norm": 3.9586544036865234,
"learning_rate": 8.88888888888889e-06,
"loss": 0.2478,
"step": 920
},
{
"epoch": 9.28,
"grad_norm": 3.8814537525177,
"learning_rate": 7.777777777777777e-06,
"loss": 0.2173,
"step": 930
},
{
"epoch": 9.38,
"grad_norm": 7.456895351409912,
"learning_rate": 6.666666666666667e-06,
"loss": 0.2315,
"step": 940
},
{
"epoch": 9.48,
"grad_norm": 4.543576717376709,
"learning_rate": 5.555555555555556e-06,
"loss": 0.2281,
"step": 950
},
{
"epoch": 9.58,
"grad_norm": 4.659777641296387,
"learning_rate": 4.444444444444445e-06,
"loss": 0.2324,
"step": 960
},
{
"epoch": 9.68,
"grad_norm": 4.048426151275635,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.2132,
"step": 970
},
{
"epoch": 9.78,
"grad_norm": 3.969958543777466,
"learning_rate": 2.2222222222222225e-06,
"loss": 0.2076,
"step": 980
},
{
"epoch": 9.88,
"grad_norm": 5.621995449066162,
"learning_rate": 1.1111111111111112e-06,
"loss": 0.2262,
"step": 990
},
{
"epoch": 9.98,
"grad_norm": 4.668450355529785,
"learning_rate": 0.0,
"loss": 0.2291,
"step": 1000
},
{
"epoch": 9.98,
"eval_accuracy": 0.894109396914446,
"eval_loss": 0.303189218044281,
"eval_runtime": 39.0907,
"eval_samples_per_second": 72.958,
"eval_steps_per_second": 1.151,
"step": 1000
},
{
"epoch": 9.98,
"step": 1000,
"total_flos": 6.364199987970048e+18,
"train_loss": 0.48602974390983583,
"train_runtime": 5351.8475,
"train_samples_per_second": 47.954,
"train_steps_per_second": 0.187
}
],
"logging_steps": 10,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 6.364199987970048e+18,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}