ben81828 commited on
Commit
4302df0
·
verified ·
1 Parent(s): 9b4e4f8

Training in progress, step 1800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02f092c8551b39f72b2480ed050635fbcee05ba27bc1999c55e842ece4cdbe8b
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:703d6c7e98f4bf68731cde74c5b20cc8e4bb82cbcfea9af1822815e13d9fd381
3
  size 29034840
last-checkpoint/global_step1800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:007e09fbadb8592f88e97176443687fca48eab7f5db9c1bbf9c938c3ed3192a6
3
+ size 43429616
last-checkpoint/global_step1800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cdfbde74aa32ffe4f8d135bf570e392a9f9a1def80871981b9b6ed0bda1af8f
3
+ size 43429616
last-checkpoint/global_step1800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b78d4331a9f341772b0453cd7d7c4821bd7f356c524cb0d19deb86dc12f825a
3
+ size 43429616
last-checkpoint/global_step1800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12c2ea98bf8d297d2d3a0840da8aa8b09b47b43d9f17d8af91b84f8f4ab50698
3
+ size 43429616
last-checkpoint/global_step1800/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b04f4e9069181e25525c87fbf11d999a2d4abd07f3eb50097501d9dd0542fec
3
+ size 637299
last-checkpoint/global_step1800/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8237e9df6dbc2214b6b3530b5a9dd66ba4d4334b0ef1e1f4ab5b6012172e97a
3
+ size 637171
last-checkpoint/global_step1800/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a3e2db29e2e998fbb06c7913097998b93745165aef55c2291b5a721e12abbfa
3
+ size 637171
last-checkpoint/global_step1800/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59280c3fd7f89f5dcc1f73643489f2258fbd2902aeb5a6835cc2a6d4eadbd71
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1750
 
1
+ global_step1800
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:222e5f04f66dfcca4efb2d648f5a480d56c2a07755d7a1bae8232d01d4b479ce
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d76f093328c54f2b94a10b8b50dc92fc99ceff9e3949d050a70042526f1d0eb
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:150a9cfa07bd33135b1b8b22033907e44137689de662dda0a482f3af84c5a1ff
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54b172dd00cba9f005761d85fb0804f751caf6e1cc8294d1873354a890cb9909
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0c0d7619e94c90efa0c89a1f208db53b48c726519761710da6fbc31a80651d8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec6adef733bd7630aa48eff1a6edaabc275d67293dcb0b7a64d71451405d489d
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa66a4b2f749b1ec7e8868668dc670ff3f6d8df765a5249122f980ae5aae6a54
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ce1c4ba1932c0f698bef90e0e4e74ded1e0db5fc35282a0815899b8be759e67
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46be255d8fc53b179b832135261a9566900d19a45a6e3b7aa116fe2096be8b96
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df430b942be14b68acaf59a6a16e4c1e563f436ec4bae0ac022a9110d0fbea5b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
- "epoch": 0.51698670605613,
5
  "eval_steps": 50,
6
- "global_step": 1750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3122,11 +3122,100 @@
3122
  "eval_steps_per_second": 0.772,
3123
  "num_input_tokens_seen": 18169344,
3124
  "step": 1750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3125
  }
3126
  ],
3127
  "logging_steps": 5,
3128
  "max_steps": 6770,
3129
- "num_input_tokens_seen": 18169344,
3130
  "num_train_epochs": 2,
3131
  "save_steps": 50,
3132
  "stateful_callbacks": {
@@ -3141,7 +3230,7 @@
3141
  "attributes": {}
3142
  }
3143
  },
3144
- "total_flos": 1198665362571264.0,
3145
  "train_batch_size": 1,
3146
  "trial_name": null,
3147
  "trial_params": null
 
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
+ "epoch": 0.5317577548005908,
5
  "eval_steps": 50,
6
+ "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3122
  "eval_steps_per_second": 0.772,
3123
  "num_input_tokens_seen": 18169344,
3124
  "step": 1750
3125
+ },
3126
+ {
3127
+ "epoch": 0.518463810930576,
3128
+ "grad_norm": 9.533140259257635,
3129
+ "learning_rate": 8.850728393614902e-05,
3130
+ "loss": 0.2966,
3131
+ "num_input_tokens_seen": 18221144,
3132
+ "step": 1755
3133
+ },
3134
+ {
3135
+ "epoch": 0.5199409158050221,
3136
+ "grad_norm": 1.1902233180287396,
3137
+ "learning_rate": 8.842926829362446e-05,
3138
+ "loss": 0.3101,
3139
+ "num_input_tokens_seen": 18272752,
3140
+ "step": 1760
3141
+ },
3142
+ {
3143
+ "epoch": 0.5214180206794683,
3144
+ "grad_norm": 5.4464127921059635,
3145
+ "learning_rate": 8.835102338247064e-05,
3146
+ "loss": 0.2545,
3147
+ "num_input_tokens_seen": 18325888,
3148
+ "step": 1765
3149
+ },
3150
+ {
3151
+ "epoch": 0.5228951255539144,
3152
+ "grad_norm": 4.908248580861331,
3153
+ "learning_rate": 8.827254966949593e-05,
3154
+ "loss": 0.3223,
3155
+ "num_input_tokens_seen": 18378016,
3156
+ "step": 1770
3157
+ },
3158
+ {
3159
+ "epoch": 0.5243722304283605,
3160
+ "grad_norm": 8.878082941549529,
3161
+ "learning_rate": 8.819384762287373e-05,
3162
+ "loss": 0.2714,
3163
+ "num_input_tokens_seen": 18431240,
3164
+ "step": 1775
3165
+ },
3166
+ {
3167
+ "epoch": 0.5258493353028065,
3168
+ "grad_norm": 1.3922104834090385,
3169
+ "learning_rate": 8.811491771213964e-05,
3170
+ "loss": 0.3438,
3171
+ "num_input_tokens_seen": 18482832,
3172
+ "step": 1780
3173
+ },
3174
+ {
3175
+ "epoch": 0.5273264401772526,
3176
+ "grad_norm": 6.15513486850916,
3177
+ "learning_rate": 8.803576040818873e-05,
3178
+ "loss": 0.2324,
3179
+ "num_input_tokens_seen": 18534992,
3180
+ "step": 1785
3181
+ },
3182
+ {
3183
+ "epoch": 0.5288035450516987,
3184
+ "grad_norm": 5.349889126448278,
3185
+ "learning_rate": 8.795637618327269e-05,
3186
+ "loss": 0.2259,
3187
+ "num_input_tokens_seen": 18587752,
3188
+ "step": 1790
3189
+ },
3190
+ {
3191
+ "epoch": 0.5302806499261448,
3192
+ "grad_norm": 11.727285406275136,
3193
+ "learning_rate": 8.7876765510997e-05,
3194
+ "loss": 0.2468,
3195
+ "num_input_tokens_seen": 18640440,
3196
+ "step": 1795
3197
+ },
3198
+ {
3199
+ "epoch": 0.5317577548005908,
3200
+ "grad_norm": 2.562809564221474,
3201
+ "learning_rate": 8.779692886631812e-05,
3202
+ "loss": 0.3127,
3203
+ "num_input_tokens_seen": 18691928,
3204
+ "step": 1800
3205
+ },
3206
+ {
3207
+ "epoch": 0.5317577548005908,
3208
+ "eval_loss": 0.3499237596988678,
3209
+ "eval_runtime": 19.3376,
3210
+ "eval_samples_per_second": 3.103,
3211
+ "eval_steps_per_second": 0.776,
3212
+ "num_input_tokens_seen": 18691928,
3213
+ "step": 1800
3214
  }
3215
  ],
3216
  "logging_steps": 5,
3217
  "max_steps": 6770,
3218
+ "num_input_tokens_seen": 18691928,
3219
  "num_train_epochs": 2,
3220
  "save_steps": 50,
3221
  "stateful_callbacks": {
 
3230
  "attributes": {}
3231
  }
3232
  },
3233
+ "total_flos": 1233113797361664.0,
3234
  "train_batch_size": 1,
3235
  "trial_name": null,
3236
  "trial_params": null