Training in progress, epoch 150, checkpoint
Browse files
last-checkpoint/model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 166496880
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af610a13989daf4d4b17a3b61eb7f777cd37755a355e75d56787476b2fa10ec2
|
3 |
size 166496880
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 330495866
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71730890587d3a348be1ae44e6a6b83b3fc6370b653f0b28fe7f78b6e17a9bac
|
3 |
size 330495866
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2369e82f6a6f2e47a7e47e23a9296d17f755e58759c961f78d5cc3d96e6e1325
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96205cabb1588b7cd4f382979691c79dabb654b1f45a221ef5567a570af3df90
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.2800098657608032,
|
3 |
"best_model_checkpoint": "chickens-composite-201616161616-150-epochs-w-hybrid-transform-metrics-test/checkpoint-54500",
|
4 |
-
"epoch":
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -21262,6 +21262,151 @@
|
|
21262 |
"eval_samples_per_second": 13.77,
|
21263 |
"eval_steps_per_second": 1.79,
|
21264 |
"step": 74500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21265 |
}
|
21266 |
],
|
21267 |
"logging_steps": 30,
|
@@ -21276,12 +21421,12 @@
|
|
21276 |
"should_evaluate": false,
|
21277 |
"should_log": false,
|
21278 |
"should_save": true,
|
21279 |
-
"should_training_stop":
|
21280 |
},
|
21281 |
"attributes": {}
|
21282 |
}
|
21283 |
},
|
21284 |
-
"total_flos": 2.
|
21285 |
"train_batch_size": 2,
|
21286 |
"trial_name": null,
|
21287 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.2800098657608032,
|
3 |
"best_model_checkpoint": "chickens-composite-201616161616-150-epochs-w-hybrid-transform-metrics-test/checkpoint-54500",
|
4 |
+
"epoch": 150.0,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 75000,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
21262 |
"eval_samples_per_second": 13.77,
|
21263 |
"eval_steps_per_second": 1.79,
|
21264 |
"step": 74500
|
21265 |
+
},
|
21266 |
+
{
|
21267 |
+
"epoch": 149.04,
|
21268 |
+
"grad_norm": 112.83297729492188,
|
21269 |
+
"learning_rate": 1.0106134441850712e-09,
|
21270 |
+
"loss": 0.2617,
|
21271 |
+
"step": 74520
|
21272 |
+
},
|
21273 |
+
{
|
21274 |
+
"epoch": 149.1,
|
21275 |
+
"grad_norm": 50.06736755371094,
|
21276 |
+
"learning_rate": 8.88238095955174e-10,
|
21277 |
+
"loss": 0.2336,
|
21278 |
+
"step": 74550
|
21279 |
+
},
|
21280 |
+
{
|
21281 |
+
"epoch": 149.16,
|
21282 |
+
"grad_norm": 63.30166244506836,
|
21283 |
+
"learning_rate": 7.737570275573314e-10,
|
21284 |
+
"loss": 0.2674,
|
21285 |
+
"step": 74580
|
21286 |
+
},
|
21287 |
+
{
|
21288 |
+
"epoch": 149.22,
|
21289 |
+
"grad_norm": 39.75635528564453,
|
21290 |
+
"learning_rate": 6.671704197735995e-10,
|
21291 |
+
"loss": 0.2511,
|
21292 |
+
"step": 74610
|
21293 |
+
},
|
21294 |
+
{
|
21295 |
+
"epoch": 149.28,
|
21296 |
+
"grad_norm": 29.61418342590332,
|
21297 |
+
"learning_rate": 5.684784409182298e-10,
|
21298 |
+
"loss": 0.2124,
|
21299 |
+
"step": 74640
|
21300 |
+
},
|
21301 |
+
{
|
21302 |
+
"epoch": 149.34,
|
21303 |
+
"grad_norm": 57.52811050415039,
|
21304 |
+
"learning_rate": 4.776812468398895e-10,
|
21305 |
+
"loss": 0.268,
|
21306 |
+
"step": 74670
|
21307 |
+
},
|
21308 |
+
{
|
21309 |
+
"epoch": 149.4,
|
21310 |
+
"grad_norm": 55.41215133666992,
|
21311 |
+
"learning_rate": 3.9477898091944135e-10,
|
21312 |
+
"loss": 0.2094,
|
21313 |
+
"step": 74700
|
21314 |
+
},
|
21315 |
+
{
|
21316 |
+
"epoch": 149.46,
|
21317 |
+
"grad_norm": 29.794158935546875,
|
21318 |
+
"learning_rate": 3.1977177407105376e-10,
|
21319 |
+
"loss": 0.2564,
|
21320 |
+
"step": 74730
|
21321 |
+
},
|
21322 |
+
{
|
21323 |
+
"epoch": 149.52,
|
21324 |
+
"grad_norm": 103.24960327148438,
|
21325 |
+
"learning_rate": 2.5265974474109054e-10,
|
21326 |
+
"loss": 0.2407,
|
21327 |
+
"step": 74760
|
21328 |
+
},
|
21329 |
+
{
|
21330 |
+
"epoch": 149.58,
|
21331 |
+
"grad_norm": 44.9169921875,
|
21332 |
+
"learning_rate": 1.9344299890866614e-10,
|
21333 |
+
"loss": 0.2346,
|
21334 |
+
"step": 74790
|
21335 |
+
},
|
21336 |
+
{
|
21337 |
+
"epoch": 149.64,
|
21338 |
+
"grad_norm": 74.23555755615234,
|
21339 |
+
"learning_rate": 1.4212163008509028e-10,
|
21340 |
+
"loss": 0.218,
|
21341 |
+
"step": 74820
|
21342 |
+
},
|
21343 |
+
{
|
21344 |
+
"epoch": 149.7,
|
21345 |
+
"grad_norm": 99.87975311279297,
|
21346 |
+
"learning_rate": 9.869571931442334e-11,
|
21347 |
+
"loss": 0.2874,
|
21348 |
+
"step": 74850
|
21349 |
+
},
|
21350 |
+
{
|
21351 |
+
"epoch": 149.76,
|
21352 |
+
"grad_norm": 54.385589599609375,
|
21353 |
+
"learning_rate": 6.316533517125578e-11,
|
21354 |
+
"loss": 0.2815,
|
21355 |
+
"step": 74880
|
21356 |
+
},
|
21357 |
+
{
|
21358 |
+
"epoch": 149.82,
|
21359 |
+
"grad_norm": 38.39406204223633,
|
21360 |
+
"learning_rate": 3.55305337634837e-11,
|
21361 |
+
"loss": 0.2368,
|
21362 |
+
"step": 74910
|
21363 |
+
},
|
21364 |
+
{
|
21365 |
+
"epoch": 149.88,
|
21366 |
+
"grad_norm": 60.73610305786133,
|
21367 |
+
"learning_rate": 1.57913587295333e-11,
|
21368 |
+
"loss": 0.2144,
|
21369 |
+
"step": 74940
|
21370 |
+
},
|
21371 |
+
{
|
21372 |
+
"epoch": 149.94,
|
21373 |
+
"grad_norm": 22.630802154541016,
|
21374 |
+
"learning_rate": 3.947841241136452e-12,
|
21375 |
+
"loss": 0.273,
|
21376 |
+
"step": 74970
|
21377 |
+
},
|
21378 |
+
{
|
21379 |
+
"epoch": 150.0,
|
21380 |
+
"grad_norm": 25.250965118408203,
|
21381 |
+
"learning_rate": 0.0,
|
21382 |
+
"loss": 0.2683,
|
21383 |
+
"step": 75000
|
21384 |
+
},
|
21385 |
+
{
|
21386 |
+
"epoch": 150.0,
|
21387 |
+
"eval_loss": 0.2826327085494995,
|
21388 |
+
"eval_map": 0.8176,
|
21389 |
+
"eval_map_50": 0.9579,
|
21390 |
+
"eval_map_75": 0.9306,
|
21391 |
+
"eval_map_chicken": 0.8172,
|
21392 |
+
"eval_map_duck": 0.7548,
|
21393 |
+
"eval_map_large": 0.8195,
|
21394 |
+
"eval_map_medium": 0.8219,
|
21395 |
+
"eval_map_plant": 0.8807,
|
21396 |
+
"eval_map_small": 0.3875,
|
21397 |
+
"eval_mar_1": 0.326,
|
21398 |
+
"eval_mar_10": 0.8548,
|
21399 |
+
"eval_mar_100": 0.8598,
|
21400 |
+
"eval_mar_100_chicken": 0.8631,
|
21401 |
+
"eval_mar_100_duck": 0.8103,
|
21402 |
+
"eval_mar_100_plant": 0.9061,
|
21403 |
+
"eval_mar_large": 0.8516,
|
21404 |
+
"eval_mar_medium": 0.8639,
|
21405 |
+
"eval_mar_small": 0.5,
|
21406 |
+
"eval_runtime": 7.0466,
|
21407 |
+
"eval_samples_per_second": 14.191,
|
21408 |
+
"eval_steps_per_second": 1.845,
|
21409 |
+
"step": 75000
|
21410 |
}
|
21411 |
],
|
21412 |
"logging_steps": 30,
|
|
|
21421 |
"should_evaluate": false,
|
21422 |
"should_log": false,
|
21423 |
"should_save": true,
|
21424 |
+
"should_training_stop": true
|
21425 |
},
|
21426 |
"attributes": {}
|
21427 |
}
|
21428 |
},
|
21429 |
+
"total_flos": 2.580173346816e+19,
|
21430 |
"train_batch_size": 2,
|
21431 |
"trial_name": null,
|
21432 |
"trial_params": null
|