ben81828 commited on
Commit
b8cf8cd
·
verified ·
1 Parent(s): 301b05a

Training in progress, step 2900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab786f741c7a478f92cbba3fdd1ddd3a418946d605848f775e0f6c469c96daf4
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f649bc97ca842140ab28c612e0e561ebbde8139ce799f23e401fad5ec0bf673a
3
  size 29034840
last-checkpoint/global_step2900/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c680026b961d2af4d8531a1009d4325d4aee0011e5a4841d0f7be05d66ade814
3
+ size 43429616
last-checkpoint/global_step2900/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42c3a0056f516a18f586cb0fd96b075cf6f88223a9519e3d05c1825c42854a5a
3
+ size 43429616
last-checkpoint/global_step2900/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7fb00ae7f41df9fdb5bcfca5ae2a0ee34109a9526870c75e600273241aa2e9b
3
+ size 43429616
last-checkpoint/global_step2900/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19fe721ba700619098cae1c23138a52aafaf176ecf1f46e7e63f24793a559f24
3
+ size 43429616
last-checkpoint/global_step2900/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4450ed4bf964babed675fcd2ae59c712e01b319719ea4623b508e9e8bbfc354d
3
+ size 637299
last-checkpoint/global_step2900/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac843c84f2c600b4501ddce6691d3bee44a437550b44b143970c70aa46ede7b9
3
+ size 637171
last-checkpoint/global_step2900/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf16b94da5090df4fc41c6cb44c197f5c1e0eeef0b24cce4b8ff38d26ec7d36
3
+ size 637171
last-checkpoint/global_step2900/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42703655a551b8b9d2fb71bb55d8aec753128d736359e443ff46543063b42daf
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2850
 
1
+ global_step2900
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff8dba2341c0517760edfde50521977f02a5bd982ffd3bc03de6109439c4f478
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce92cea831a04716b4b472f1dad1cc986b2021dee9aac057217f5d455b27ec42
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2bf831df9fbade9ac2a8db79798bc2a7b1afb85a78a6e463ec7a7db4acc0f8e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cddb73bbdf0f6f6a2c3182d70f7ad5d587353b164c08dd4f383b940d6b61e4e
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8602ff0a0fa366d46b61c0ef2b23ce468387898cf2bc1027e5450de73ddf647f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b24b508e466beb446d37377d2a04757d3bc2b4230de3ac56b25a65d7753a74c1
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bb51d675cf23603b1b765cd645f53d6b66ddb104d56d48674e9c798e086f696
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4c6a18a7de8b25b21673ba2ff7efbaaae00ec8c453c7975b467c1df87b87022
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8784a806a517b57d5b84ca561594528563d0fdcdc4d0513f5b313f6cc716b68e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6207ec0a6d3415bd88090ab549215521abec8051b511fdc1a1cfc0484ab44197
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
- "epoch": 0.8419497784342689,
5
  "eval_steps": 50,
6
- "global_step": 2850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5080,11 +5080,100 @@
5080
  "eval_steps_per_second": 0.77,
5081
  "num_input_tokens_seen": 29600536,
5082
  "step": 2850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5083
  }
5084
  ],
5085
  "logging_steps": 5,
5086
  "max_steps": 6770,
5087
- "num_input_tokens_seen": 29600536,
5088
  "num_train_epochs": 2,
5089
  "save_steps": 50,
5090
  "stateful_callbacks": {
@@ -5099,7 +5188,7 @@
5099
  "attributes": {}
5100
  }
5101
  },
5102
- "total_flos": 1952800415416320.0,
5103
  "train_batch_size": 1,
5104
  "trial_name": null,
5105
  "trial_params": null
 
1
  {
2
  "best_metric": 0.1869634985923767,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-2350",
4
+ "epoch": 0.8567208271787297,
5
  "eval_steps": 50,
6
+ "global_step": 2900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5080
  "eval_steps_per_second": 0.77,
5081
  "num_input_tokens_seen": 29600536,
5082
  "step": 2850
5083
+ },
5084
+ {
5085
+ "epoch": 0.843426883308715,
5086
+ "grad_norm": 1.4547945162774674,
5087
+ "learning_rate": 6.675498736513036e-05,
5088
+ "loss": 0.2163,
5089
+ "num_input_tokens_seen": 29652440,
5090
+ "step": 2855
5091
+ },
5092
+ {
5093
+ "epoch": 0.844903988183161,
5094
+ "grad_norm": 1.3348915008279034,
5095
+ "learning_rate": 6.663987164217236e-05,
5096
+ "loss": 0.2589,
5097
+ "num_input_tokens_seen": 29704376,
5098
+ "step": 2860
5099
+ },
5100
+ {
5101
+ "epoch": 0.8463810930576071,
5102
+ "grad_norm": 2.2889038673998603,
5103
+ "learning_rate": 6.652465664590703e-05,
5104
+ "loss": 0.2325,
5105
+ "num_input_tokens_seen": 29756504,
5106
+ "step": 2865
5107
+ },
5108
+ {
5109
+ "epoch": 0.8478581979320532,
5110
+ "grad_norm": 1.3144459851000174,
5111
+ "learning_rate": 6.640934306370586e-05,
5112
+ "loss": 0.242,
5113
+ "num_input_tokens_seen": 29807328,
5114
+ "step": 2870
5115
+ },
5116
+ {
5117
+ "epoch": 0.8493353028064993,
5118
+ "grad_norm": 1.2238000417554058,
5119
+ "learning_rate": 6.629393158352854e-05,
5120
+ "loss": 0.2169,
5121
+ "num_input_tokens_seen": 29859208,
5122
+ "step": 2875
5123
+ },
5124
+ {
5125
+ "epoch": 0.8508124076809453,
5126
+ "grad_norm": 1.5853683051276755,
5127
+ "learning_rate": 6.61784228939188e-05,
5128
+ "loss": 0.2335,
5129
+ "num_input_tokens_seen": 29911128,
5130
+ "step": 2880
5131
+ },
5132
+ {
5133
+ "epoch": 0.8522895125553914,
5134
+ "grad_norm": 0.8463095817606877,
5135
+ "learning_rate": 6.606281768400032e-05,
5136
+ "loss": 0.1913,
5137
+ "num_input_tokens_seen": 29962384,
5138
+ "step": 2885
5139
+ },
5140
+ {
5141
+ "epoch": 0.8537666174298375,
5142
+ "grad_norm": 1.6841064365294203,
5143
+ "learning_rate": 6.594711664347264e-05,
5144
+ "loss": 0.2425,
5145
+ "num_input_tokens_seen": 30013664,
5146
+ "step": 2890
5147
+ },
5148
+ {
5149
+ "epoch": 0.8552437223042836,
5150
+ "grad_norm": 1.1559578384960632,
5151
+ "learning_rate": 6.5831320462607e-05,
5152
+ "loss": 0.2312,
5153
+ "num_input_tokens_seen": 30066016,
5154
+ "step": 2895
5155
+ },
5156
+ {
5157
+ "epoch": 0.8567208271787297,
5158
+ "grad_norm": 0.9327537302615286,
5159
+ "learning_rate": 6.571542983224223e-05,
5160
+ "loss": 0.2029,
5161
+ "num_input_tokens_seen": 30118072,
5162
+ "step": 2900
5163
+ },
5164
+ {
5165
+ "epoch": 0.8567208271787297,
5166
+ "eval_loss": 0.39434579014778137,
5167
+ "eval_runtime": 19.1253,
5168
+ "eval_samples_per_second": 3.137,
5169
+ "eval_steps_per_second": 0.784,
5170
+ "num_input_tokens_seen": 30118072,
5171
+ "step": 2900
5172
  }
5173
  ],
5174
  "logging_steps": 5,
5175
  "max_steps": 6770,
5176
+ "num_input_tokens_seen": 30118072,
5177
  "num_train_epochs": 2,
5178
  "save_steps": 50,
5179
  "stateful_callbacks": {
 
5188
  "attributes": {}
5189
  }
5190
  },
5191
+ "total_flos": 1986962888589312.0,
5192
  "train_batch_size": 1,
5193
  "trial_name": null,
5194
  "trial_params": null