ben81828 commited on
Commit
3db7ccb
·
verified ·
1 Parent(s): fc15aea

Training in progress, step 800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87a05b954f6ad396e106053a5bf73274eb4671d1ca4b7518421076c40296fc81
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6469efd0e0f557a18cd35ba4f70eb77371da998a85d2c741e6c6de160fc4ff06
3
  size 29034840
last-checkpoint/global_step800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2174a443dad6d9006acfe79d324978c9f07b5ea257aca9d95fd75624fa54cb8
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:426fa667d71ccf8b60e8ade9d81b9ad760a7ce15cee036b23b87adbc4a07f4bf
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dfc313755621ef1469887426d377a58d160443a75db41587783dcdc3dc396d1
3
+ size 43429616
last-checkpoint/global_step800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e060e6d58d9af18772eb1d31b043460d98bb11979e5a6b98631c1444a78c439e
3
+ size 43429616
last-checkpoint/global_step800/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33467f0cec03b4779dee333111c3bd03a22372ddf3a2a795922f0ea80db2d9e0
3
+ size 637299
last-checkpoint/global_step800/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cff6501f8b06265046ed0183476dba0c748bb06dda09c2b6e873c247c05ea43d
3
+ size 637171
last-checkpoint/global_step800/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0768848701c35a91de3ad97687565841b9f4771f8101fdde7bedbe1ebcbc9f3
3
+ size 637171
last-checkpoint/global_step800/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4416b108bf4bd22e915b38c44cb62e05b07e52df22336b642b6cf6d7761cbad4
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step750
 
1
+ global_step800
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66b4ef73f3603a1b91082ee108fa8299ebe45fb3cdeec7d0bdca1982af5bf07d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4f003069486a57c6ac033f30cf4c4213eb6b7d659bab68a5a50fdb8da7c4118
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:882eccb2a272cf97cd645050bd077c971e48e78584f717a1b1cc9b5f1c9326dc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a016ef89b4392d083b2c15a7cf06a39bc61a759f648cf6dc03f1c32b89a526aa
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1626bac54f5806a604b7efdd749c5b65d63bbb40fc55c3744aae6130aa24f3de
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b56fe0893036dc052d18d90feba4328b90ea71561942150b07406ac3d7a700e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40cf83ff997228172cf0b991f9d5209728ccf2f0a75841db5e31e647779a1ad2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c203d12c2c308dab785ed672c9ca27fb6a2f72acd1e1552d1516c7b0006013
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb90ddaf9f2555f7a751dcba5b295eed1e25f610d0357becbe825d3cf6fda52d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c7fcb50f64a1a582f4b708119f6541895781dfbde796583be012e2904ba248d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
- "epoch": 0.22156573116691286,
5
  "eval_steps": 50,
6
- "global_step": 750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1342,11 +1342,100 @@
1342
  "eval_steps_per_second": 0.783,
1343
  "num_input_tokens_seen": 7779872,
1344
  "step": 750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1345
  }
1346
  ],
1347
  "logging_steps": 5,
1348
  "max_steps": 6770,
1349
- "num_input_tokens_seen": 7779872,
1350
  "num_train_epochs": 2,
1351
  "save_steps": 50,
1352
  "stateful_callbacks": {
@@ -1361,7 +1450,7 @@
1361
  "attributes": {}
1362
  }
1363
  },
1364
- "total_flos": 513213288415232.0,
1365
  "train_batch_size": 1,
1366
  "trial_name": null,
1367
  "trial_params": null
 
1
  {
2
  "best_metric": 0.4665524661540985,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-750",
4
+ "epoch": 0.2363367799113737,
5
  "eval_steps": 50,
6
+ "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1342
  "eval_steps_per_second": 0.783,
1343
  "num_input_tokens_seen": 7779872,
1344
  "step": 750
1345
+ },
1346
+ {
1347
+ "epoch": 0.22304283604135894,
1348
+ "grad_norm": 7.392056712218606,
1349
+ "learning_rate": 9.897109742122721e-05,
1350
+ "loss": 0.5248,
1351
+ "num_input_tokens_seen": 7832168,
1352
+ "step": 755
1353
+ },
1354
+ {
1355
+ "epoch": 0.22451994091580502,
1356
+ "grad_norm": 9.230824229530686,
1357
+ "learning_rate": 9.894630330827686e-05,
1358
+ "loss": 0.5017,
1359
+ "num_input_tokens_seen": 7884040,
1360
+ "step": 760
1361
+ },
1362
+ {
1363
+ "epoch": 0.2259970457902511,
1364
+ "grad_norm": 11.203609848309013,
1365
+ "learning_rate": 9.892121718217182e-05,
1366
+ "loss": 0.4896,
1367
+ "num_input_tokens_seen": 7935528,
1368
+ "step": 765
1369
+ },
1370
+ {
1371
+ "epoch": 0.2274741506646972,
1372
+ "grad_norm": 30.185572869944284,
1373
+ "learning_rate": 9.88958391925757e-05,
1374
+ "loss": 0.5125,
1375
+ "num_input_tokens_seen": 7987760,
1376
+ "step": 770
1377
+ },
1378
+ {
1379
+ "epoch": 0.22895125553914328,
1380
+ "grad_norm": 18.649424971543322,
1381
+ "learning_rate": 9.887016949089333e-05,
1382
+ "loss": 0.5615,
1383
+ "num_input_tokens_seen": 8039400,
1384
+ "step": 775
1385
+ },
1386
+ {
1387
+ "epoch": 0.23042836041358936,
1388
+ "grad_norm": 5.360845077873566,
1389
+ "learning_rate": 9.884420823026989e-05,
1390
+ "loss": 0.494,
1391
+ "num_input_tokens_seen": 8092440,
1392
+ "step": 780
1393
+ },
1394
+ {
1395
+ "epoch": 0.23190546528803546,
1396
+ "grad_norm": 10.101391912363345,
1397
+ "learning_rate": 9.881795556558999e-05,
1398
+ "loss": 0.5122,
1399
+ "num_input_tokens_seen": 8145040,
1400
+ "step": 785
1401
+ },
1402
+ {
1403
+ "epoch": 0.23338257016248154,
1404
+ "grad_norm": 5.90491429019666,
1405
+ "learning_rate": 9.879141165347678e-05,
1406
+ "loss": 0.4925,
1407
+ "num_input_tokens_seen": 8196904,
1408
+ "step": 790
1409
+ },
1410
+ {
1411
+ "epoch": 0.23485967503692762,
1412
+ "grad_norm": 6.228283676778458,
1413
+ "learning_rate": 9.876457665229097e-05,
1414
+ "loss": 0.4752,
1415
+ "num_input_tokens_seen": 8249232,
1416
+ "step": 795
1417
+ },
1418
+ {
1419
+ "epoch": 0.2363367799113737,
1420
+ "grad_norm": 8.496099871334396,
1421
+ "learning_rate": 9.87374507221299e-05,
1422
+ "loss": 0.4239,
1423
+ "num_input_tokens_seen": 8301976,
1424
+ "step": 800
1425
+ },
1426
+ {
1427
+ "epoch": 0.2363367799113737,
1428
+ "eval_loss": 0.48219749331474304,
1429
+ "eval_runtime": 19.0825,
1430
+ "eval_samples_per_second": 3.144,
1431
+ "eval_steps_per_second": 0.786,
1432
+ "num_input_tokens_seen": 8301976,
1433
+ "step": 800
1434
  }
1435
  ],
1436
  "logging_steps": 5,
1437
  "max_steps": 6770,
1438
+ "num_input_tokens_seen": 8301976,
1439
  "num_train_epochs": 2,
1440
  "save_steps": 50,
1441
  "stateful_callbacks": {
 
1450
  "attributes": {}
1451
  }
1452
  },
1453
+ "total_flos": 547680066994176.0,
1454
  "train_batch_size": 1,
1455
  "trial_name": null,
1456
  "trial_params": null