ben81828 commited on
Commit
a6b239e
·
verified ·
1 Parent(s): 82a543a

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d69e5b496c36ad259db37344eb706b381e79be34b12a26861d7f318295dcfd58
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60c80d389637b5f5cae317955ea9368ed58131fb66260b7e896c23b167e9cf7b
3
  size 29034840
last-checkpoint/global_step1200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b1be5e541d7054f593eb5336f07144b4c45a392caa51e3f89f3b9a4039b25e1
3
+ size 43429616
last-checkpoint/global_step1200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47da22d2e76ec4ef4c272a509f43a0f8b566ae69ed73e32b131ff13d81209f0a
3
+ size 43429616
last-checkpoint/global_step1200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d8cd6478436876aace2da6ad611707eedbe138391f26d8840dbc1e9f67a653
3
+ size 43429616
last-checkpoint/global_step1200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4927440aed262ba80b9104518087387cc6a40c362c4e4c3a5b6aed1dee2afd29
3
+ size 43429616
last-checkpoint/global_step1200/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d06f5911185a6ebb03a50cfa88a0b397d7dcb732800b879d25a0b602da1df67
3
+ size 637299
last-checkpoint/global_step1200/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ddb6eccf101a9bfdd7a0e209ad8e34c5bd436c139a6cc24e86858cfb663649f
3
+ size 637171
last-checkpoint/global_step1200/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63fa0ba1e22cd4330a732b53b080494fee16b561a8509fb226cb0abdd186e7ea
3
+ size 637171
last-checkpoint/global_step1200/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a54361c50a0229f80835738434c247594a5157c91a95390f6abdef0e295dfb9d
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1150
 
1
+ global_step1200
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:274dc3860ee0c7f4d5348f60910a4b568498c04adfefb89f905b1c78a82c1312
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a209a0c0025f9ce8e2beeba50c1f0828d5c34a2482310fcd0bf5fc24c2c67be2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9aa441491b9ca89e796944520fa1db332a67c0a1a920be83edd2d96d741716d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67fb929b8c51f9b1c6ff9f11366e57e55128a1d36df85a9d37a008b49017a75
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1ee3434533b24fb771504fa8cceb5c2ea25fe0de1641128feaceccc65afe6ed
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b9ef3b0c0978d0b611f4257c939f1c2c6f07e6227bfea6675532d285b0b64a7
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93b4a44be1335173d2e3120bd0d1e6346f3e832d8935752c70ce1e98f017fa87
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0183d14c8ed52ee533139532e9bcf7bc34ec297a064845b35741cb501d92675f
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc7c1c90680adb84a90114a256d228473c266c4da4454cc4d39e70f828ce64ad
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17a5556c8233d79e1be6279770d5e53a5fee5448790e76942f098adafb906464
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.42494550347328186,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1150",
4
- "epoch": 0.3397341211225997,
5
  "eval_steps": 50,
6
- "global_step": 1150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2054,11 +2054,100 @@
2054
  "eval_steps_per_second": 0.765,
2055
  "num_input_tokens_seen": 11933632,
2056
  "step": 1150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2057
  }
2058
  ],
2059
  "logging_steps": 5,
2060
  "max_steps": 6770,
2061
- "num_input_tokens_seen": 11933632,
2062
  "num_train_epochs": 2,
2063
  "save_steps": 50,
2064
  "stateful_callbacks": {
@@ -2073,7 +2162,7 @@
2073
  "attributes": {}
2074
  }
2075
  },
2076
- "total_flos": 787259637891072.0,
2077
  "train_batch_size": 1,
2078
  "trial_name": null,
2079
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.2966395914554596,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1200",
4
+ "epoch": 0.35450516986706054,
5
  "eval_steps": 50,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2054
  "eval_steps_per_second": 0.765,
2055
  "num_input_tokens_seen": 11933632,
2056
  "step": 1150
2057
+ },
2058
+ {
2059
+ "epoch": 0.3412112259970458,
2060
+ "grad_norm": 4.778973114307738,
2061
+ "learning_rate": 9.60798322936489e-05,
2062
+ "loss": 0.3716,
2063
+ "num_input_tokens_seen": 11986496,
2064
+ "step": 1155
2065
+ },
2066
+ {
2067
+ "epoch": 0.34268833087149186,
2068
+ "grad_norm": 8.20796735033587,
2069
+ "learning_rate": 9.603229145032993e-05,
2070
+ "loss": 0.4234,
2071
+ "num_input_tokens_seen": 12039112,
2072
+ "step": 1160
2073
+ },
2074
+ {
2075
+ "epoch": 0.34416543574593794,
2076
+ "grad_norm": 7.158508103350641,
2077
+ "learning_rate": 9.598447597882181e-05,
2078
+ "loss": 0.3973,
2079
+ "num_input_tokens_seen": 12091728,
2080
+ "step": 1165
2081
+ },
2082
+ {
2083
+ "epoch": 0.345642540620384,
2084
+ "grad_norm": 9.320131732384727,
2085
+ "learning_rate": 9.593638616439118e-05,
2086
+ "loss": 0.3494,
2087
+ "num_input_tokens_seen": 12143896,
2088
+ "step": 1170
2089
+ },
2090
+ {
2091
+ "epoch": 0.34711964549483015,
2092
+ "grad_norm": 10.150141046652656,
2093
+ "learning_rate": 9.588802229394137e-05,
2094
+ "loss": 0.4182,
2095
+ "num_input_tokens_seen": 12195336,
2096
+ "step": 1175
2097
+ },
2098
+ {
2099
+ "epoch": 0.34859675036927623,
2100
+ "grad_norm": 9.270011962927722,
2101
+ "learning_rate": 9.583938465601075e-05,
2102
+ "loss": 0.462,
2103
+ "num_input_tokens_seen": 12247696,
2104
+ "step": 1180
2105
+ },
2106
+ {
2107
+ "epoch": 0.3500738552437223,
2108
+ "grad_norm": 8.96068778293971,
2109
+ "learning_rate": 9.5790473540771e-05,
2110
+ "loss": 0.4451,
2111
+ "num_input_tokens_seen": 12300040,
2112
+ "step": 1185
2113
+ },
2114
+ {
2115
+ "epoch": 0.3515509601181684,
2116
+ "grad_norm": 24.761476817148992,
2117
+ "learning_rate": 9.574128924002533e-05,
2118
+ "loss": 0.4789,
2119
+ "num_input_tokens_seen": 12351904,
2120
+ "step": 1190
2121
+ },
2122
+ {
2123
+ "epoch": 0.35302806499261447,
2124
+ "grad_norm": 1.8519516556186366,
2125
+ "learning_rate": 9.569183204720677e-05,
2126
+ "loss": 0.3898,
2127
+ "num_input_tokens_seen": 12403280,
2128
+ "step": 1195
2129
+ },
2130
+ {
2131
+ "epoch": 0.35450516986706054,
2132
+ "grad_norm": 5.005586803143539,
2133
+ "learning_rate": 9.564210225737647e-05,
2134
+ "loss": 0.3296,
2135
+ "num_input_tokens_seen": 12456040,
2136
+ "step": 1200
2137
+ },
2138
+ {
2139
+ "epoch": 0.35450516986706054,
2140
+ "eval_loss": 0.2966395914554596,
2141
+ "eval_runtime": 19.5244,
2142
+ "eval_samples_per_second": 3.073,
2143
+ "eval_steps_per_second": 0.768,
2144
+ "num_input_tokens_seen": 12456040,
2145
+ "step": 1200
2146
  }
2147
  ],
2148
  "logging_steps": 5,
2149
  "max_steps": 6770,
2150
+ "num_input_tokens_seen": 12456040,
2151
  "num_train_epochs": 2,
2152
  "save_steps": 50,
2153
  "stateful_callbacks": {
 
2162
  "attributes": {}
2163
  }
2164
  },
2165
+ "total_flos": 821733054480384.0,
2166
  "train_batch_size": 1,
2167
  "trial_name": null,
2168
  "trial_params": null