ben81828 commited on
Commit
e9bc7d9
·
verified ·
1 Parent(s): 5b8ef6e

Training in progress, step 2500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20c5d0122589f7af19ec90b59ba30300ed4adb6c4fe5dd76d6539c3ea636594d
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51ba4cb036f96637ea50997962c817be8981b7c185736e25bcaea373e2187935
3
  size 29034840
last-checkpoint/global_step2500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd56c8430f9624cb4cd6078c475d6656eb588c42552315d1be5f715dd91147f
3
+ size 43429616
last-checkpoint/global_step2500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2359ffaa177c733e5a3ab724865ad8c9288b308254c24c08aa8822587b877089
3
+ size 43429616
last-checkpoint/global_step2500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0c116fdc0341a1f6bce86f8b04b43fd463ba0f0fe1f4b51b29e089c1b995cf
3
+ size 43429616
last-checkpoint/global_step2500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe96322a639dfd48107cacc8fe39f35e0c82f92328944532a672b5d4cc95750
3
+ size 43429616
last-checkpoint/global_step2500/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4382f7f393e0176141baa6d4189c9d33a0c795a3103569af04a09146741b23c
3
+ size 637299
last-checkpoint/global_step2500/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3b7f2c2a48b2f065e03436109a62bdff8c72dadcad153c609e970c4f78b7f43
3
+ size 637171
last-checkpoint/global_step2500/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a15e793a3acc8582fc05f1fcd6192eb44d9139e6431ec364cdc8d651eeb43294
3
+ size 637171
last-checkpoint/global_step2500/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c0dfd84758f3e6c94c6db5c547aaf10008e41d961554cf2727d4835f2b88425
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2450
 
1
+ global_step2500
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b8aff7a1897a7eaf48c78ea1f8115c061edfa2b6fa42280e2c1c58fe66b1f8a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a97c73c15a2a5b2de7dc426a700b2053aee43809425431c513cc5e3aab6c2107
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d73a7524f07999ef35d5d9b107dcc1678eae2ada841644e1bd00ec0734368c2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1296b339c1b16ab7e14352a269004d20ede428aef748283fb0a6650d62f58129
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:149a2ed30e88bf94d622f8d7693f382286a49ac536a3f63efc50cab63f6b9f39
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:000b1637f5e73170f2337500a6a083df3a43d967d642b6c3a68f60deb6c3b960
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18f12c1b5aae2b7d4bb968649839fc7ff1ce6131508baad4b633693b04cee910
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8bcb6e7802f6d888bc099642911087298cfb1adf7053a2d43a67192a53404ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b84408c95ab80e030217b567f3dab92b95dbe55b4cee205c514e2dbc80b1b37f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be10edf7c6be9f43df78beaad8c1006d912dae1be4452d19b94e400653092195
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
- "epoch": 0.723781388478582,
5
  "eval_steps": 50,
6
- "global_step": 2450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4368,11 +4368,100 @@
4368
  "eval_steps_per_second": 0.786,
4369
  "num_input_tokens_seen": 25439896,
4370
  "step": 2450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4371
  }
4372
  ],
4373
  "logging_steps": 5,
4374
  "max_steps": 6770,
4375
- "num_input_tokens_seen": 25439896,
4376
  "num_train_epochs": 2,
4377
  "save_steps": 50,
4378
  "stateful_callbacks": {
@@ -4387,7 +4476,7 @@
4387
  "attributes": {}
4388
  }
4389
  },
4390
- "total_flos": 1678342974603264.0,
4391
  "train_batch_size": 1,
4392
  "trial_name": null,
4393
  "trial_params": null
 
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
+ "epoch": 0.7385524372230429,
5
  "eval_steps": 50,
6
+ "global_step": 2500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4368
  "eval_steps_per_second": 0.786,
4369
  "num_input_tokens_seen": 25439896,
4370
  "step": 2450
4371
+ },
4372
+ {
4373
+ "epoch": 0.725258493353028,
4374
+ "grad_norm": 15.826053352114771,
4375
+ "learning_rate": 7.558293193995394e-05,
4376
+ "loss": 0.2752,
4377
+ "num_input_tokens_seen": 25491160,
4378
+ "step": 2455
4379
+ },
4380
+ {
4381
+ "epoch": 0.7267355982274741,
4382
+ "grad_norm": 1.401909903248283,
4383
+ "learning_rate": 7.547792565351075e-05,
4384
+ "loss": 0.2399,
4385
+ "num_input_tokens_seen": 25543152,
4386
+ "step": 2460
4387
+ },
4388
+ {
4389
+ "epoch": 0.7282127031019202,
4390
+ "grad_norm": 0.8017657229162621,
4391
+ "learning_rate": 7.537276736601864e-05,
4392
+ "loss": 0.2351,
4393
+ "num_input_tokens_seen": 25595312,
4394
+ "step": 2465
4395
+ },
4396
+ {
4397
+ "epoch": 0.7296898079763663,
4398
+ "grad_norm": 1.4744204191665158,
4399
+ "learning_rate": 7.526745770485088e-05,
4400
+ "loss": 0.1837,
4401
+ "num_input_tokens_seen": 25648680,
4402
+ "step": 2470
4403
+ },
4404
+ {
4405
+ "epoch": 0.7311669128508124,
4406
+ "grad_norm": 1.2465650859091382,
4407
+ "learning_rate": 7.516199729828385e-05,
4408
+ "loss": 0.3093,
4409
+ "num_input_tokens_seen": 25701464,
4410
+ "step": 2475
4411
+ },
4412
+ {
4413
+ "epoch": 0.7326440177252584,
4414
+ "grad_norm": 1.880567862109699,
4415
+ "learning_rate": 7.505638677549327e-05,
4416
+ "loss": 0.223,
4417
+ "num_input_tokens_seen": 25753528,
4418
+ "step": 2480
4419
+ },
4420
+ {
4421
+ "epoch": 0.7341211225997046,
4422
+ "grad_norm": 0.6866693023387563,
4423
+ "learning_rate": 7.495062676655049e-05,
4424
+ "loss": 0.2128,
4425
+ "num_input_tokens_seen": 25805768,
4426
+ "step": 2485
4427
+ },
4428
+ {
4429
+ "epoch": 0.7355982274741507,
4430
+ "grad_norm": 1.1016249736691914,
4431
+ "learning_rate": 7.484471790241865e-05,
4432
+ "loss": 0.2703,
4433
+ "num_input_tokens_seen": 25856672,
4434
+ "step": 2490
4435
+ },
4436
+ {
4437
+ "epoch": 0.7370753323485968,
4438
+ "grad_norm": 11.244379366241214,
4439
+ "learning_rate": 7.473866081494896e-05,
4440
+ "loss": 0.2456,
4441
+ "num_input_tokens_seen": 25908544,
4442
+ "step": 2495
4443
+ },
4444
+ {
4445
+ "epoch": 0.7385524372230429,
4446
+ "grad_norm": 1.4228671338565775,
4447
+ "learning_rate": 7.463245613687695e-05,
4448
+ "loss": 0.2382,
4449
+ "num_input_tokens_seen": 25961056,
4450
+ "step": 2500
4451
+ },
4452
+ {
4453
+ "epoch": 0.7385524372230429,
4454
+ "eval_loss": 0.2542795240879059,
4455
+ "eval_runtime": 19.1344,
4456
+ "eval_samples_per_second": 3.136,
4457
+ "eval_steps_per_second": 0.784,
4458
+ "num_input_tokens_seen": 25961056,
4459
+ "step": 2500
4460
  }
4461
  ],
4462
  "logging_steps": 5,
4463
  "max_steps": 6770,
4464
+ "num_input_tokens_seen": 25961056,
4465
  "num_train_epochs": 2,
4466
  "save_steps": 50,
4467
  "stateful_callbacks": {
 
4476
  "attributes": {}
4477
  }
4478
  },
4479
+ "total_flos": 1712735614402560.0,
4480
  "train_batch_size": 1,
4481
  "trial_name": null,
4482
  "trial_params": null