ben81828 commited on
Commit
dd2418d
·
verified ·
1 Parent(s): cb17d66

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acaedc144367149e2a92be2a9c0e75df4817558ae00e175c156fbc72ea2ce596
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6287ca2ed8e9ca31735e5d987ece6f9b430689719f525a3a04c74ff709108ab
3
  size 29034840
last-checkpoint/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:811cbafedd82a09164ddba198d6c34b0008bf0d647a089b1c2872a336e918a13
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:329da58e74091db53383195c8a404a66ede49ce2e0d0304911e39d4d52c34f6f
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e48758f8cd6287cac9729db7c92d3c5b7e5b730e6efd95ccc96f45959761f30
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d67ee88b9ec9a55af37e69be5980321b835195cf4becc47caa8bc10c18c26498
3
+ size 43429616
last-checkpoint/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33467f0cec03b4779dee333111c3bd03a22372ddf3a2a795922f0ea80db2d9e0
3
+ size 637299
last-checkpoint/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cff6501f8b06265046ed0183476dba0c748bb06dda09c2b6e873c247c05ea43d
3
+ size 637171
last-checkpoint/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0768848701c35a91de3ad97687565841b9f4771f8101fdde7bedbe1ebcbc9f3
3
+ size 637171
last-checkpoint/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4416b108bf4bd22e915b38c44cb62e05b07e52df22336b642b6cf6d7761cbad4
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step750
 
1
+ global_step800
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66b4ef73f3603a1b91082ee108fa8299ebe45fb3cdeec7d0bdca1982af5bf07d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f003069486a57c6ac033f30cf4c4213eb6b7d659bab68a5a50fdb8da7c4118
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:882eccb2a272cf97cd645050bd077c971e48e78584f717a1b1cc9b5f1c9326dc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a016ef89b4392d083b2c15a7cf06a39bc61a759f648cf6dc03f1c32b89a526aa
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1626bac54f5806a604b7efdd749c5b65d63bbb40fc55c3744aae6130aa24f3de
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b56fe0893036dc052d18d90feba4328b90ea71561942150b07406ac3d7a700e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40cf83ff997228172cf0b991f9d5209728ccf2f0a75841db5e31e647779a1ad2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c203d12c2c308dab785ed672c9ca27fb6a2f72acd1e1552d1516c7b0006013
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2ccf9d8c4b5840071603429d56208abefd14e276e9351f1160a607485df78ae
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d85b710a5709549c0b4daddcc052f2ed242a5d916ac9ca030c805e7ff501c88
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.38629925315477726,
5
  "eval_steps": 50,
6
- "global_step": 750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1342,11 +1342,100 @@
1342
  "eval_steps_per_second": 0.773,
1343
  "num_input_tokens_seen": 7488000,
1344
  "step": 750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345
  }
1346
  ],
1347
  "logging_steps": 5,
1348
  "max_steps": 3400,
1349
- "num_input_tokens_seen": 7488000,
1350
  "num_train_epochs": 2,
1351
  "save_steps": 50,
1352
  "stateful_callbacks": {
@@ -1361,7 +1450,7 @@
1361
  "attributes": {}
1362
  }
1363
  },
1364
- "total_flos": 494044521955328.0,
1365
  "train_batch_size": 1,
1366
  "trial_name": null,
1367
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.41205253669842906,
5
  "eval_steps": 50,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1342
  "eval_steps_per_second": 0.773,
1343
  "num_input_tokens_seen": 7488000,
1344
  "step": 750
1345
+ },
1346
+ {
1347
+ "epoch": 0.3888745815091424,
1348
+ "grad_norm": 3.511127189174474,
1349
+ "learning_rate": 9.21223287233121e-05,
1350
+ "loss": 0.0426,
1351
+ "num_input_tokens_seen": 7537920,
1352
+ "step": 755
1353
+ },
1354
+ {
1355
+ "epoch": 0.3914499098635076,
1356
+ "grad_norm": 2.858952603531337,
1357
+ "learning_rate": 9.199082286279622e-05,
1358
+ "loss": 0.0094,
1359
+ "num_input_tokens_seen": 7587840,
1360
+ "step": 760
1361
+ },
1362
+ {
1363
+ "epoch": 0.3940252382178728,
1364
+ "grad_norm": 1.9568584051828664,
1365
+ "learning_rate": 9.185832391312644e-05,
1366
+ "loss": 0.008,
1367
+ "num_input_tokens_seen": 7637760,
1368
+ "step": 765
1369
+ },
1370
+ {
1371
+ "epoch": 0.39660056657223797,
1372
+ "grad_norm": 1.7485145115139782,
1373
+ "learning_rate": 9.172483500792244e-05,
1374
+ "loss": 0.0297,
1375
+ "num_input_tokens_seen": 7687680,
1376
+ "step": 770
1377
+ },
1378
+ {
1379
+ "epoch": 0.39917589492660316,
1380
+ "grad_norm": 0.05161945290023201,
1381
+ "learning_rate": 9.159035930421658e-05,
1382
+ "loss": 0.0239,
1383
+ "num_input_tokens_seen": 7737600,
1384
+ "step": 775
1385
+ },
1386
+ {
1387
+ "epoch": 0.40175122328096835,
1388
+ "grad_norm": 5.343080934932771,
1389
+ "learning_rate": 9.145489998237902e-05,
1390
+ "loss": 0.0162,
1391
+ "num_input_tokens_seen": 7787520,
1392
+ "step": 780
1393
+ },
1394
+ {
1395
+ "epoch": 0.4043265516353335,
1396
+ "grad_norm": 0.020924688334471742,
1397
+ "learning_rate": 9.131846024604274e-05,
1398
+ "loss": 0.0309,
1399
+ "num_input_tokens_seen": 7837440,
1400
+ "step": 785
1401
+ },
1402
+ {
1403
+ "epoch": 0.4069018799896987,
1404
+ "grad_norm": 0.5607043400469908,
1405
+ "learning_rate": 9.11810433220276e-05,
1406
+ "loss": 0.0432,
1407
+ "num_input_tokens_seen": 7887360,
1408
+ "step": 790
1409
+ },
1410
+ {
1411
+ "epoch": 0.40947720834406387,
1412
+ "grad_norm": 0.05221845282486231,
1413
+ "learning_rate": 9.104265246026415e-05,
1414
+ "loss": 0.0022,
1415
+ "num_input_tokens_seen": 7937280,
1416
+ "step": 795
1417
+ },
1418
+ {
1419
+ "epoch": 0.41205253669842906,
1420
+ "grad_norm": 0.09825330413354653,
1421
+ "learning_rate": 9.090329093371666e-05,
1422
+ "loss": 0.0065,
1423
+ "num_input_tokens_seen": 7987200,
1424
+ "step": 800
1425
+ },
1426
+ {
1427
+ "epoch": 0.41205253669842906,
1428
+ "eval_loss": 0.006797688081860542,
1429
+ "eval_runtime": 19.3386,
1430
+ "eval_samples_per_second": 3.103,
1431
+ "eval_steps_per_second": 0.776,
1432
+ "num_input_tokens_seen": 7987200,
1433
+ "step": 800
1434
  }
1435
  ],
1436
  "logging_steps": 5,
1437
  "max_steps": 3400,
1438
+ "num_input_tokens_seen": 7987200,
1439
  "num_train_epochs": 2,
1440
  "save_steps": 50,
1441
  "stateful_callbacks": {
 
1450
  "attributes": {}
1451
  }
1452
  },
1453
+ "total_flos": 526984501526528.0,
1454
  "train_batch_size": 1,
1455
  "trial_name": null,
1456
  "trial_params": null