ben81828 commited on
Commit
e371343
·
verified ·
1 Parent(s): 7772d72

Training in progress, step 650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:085286a0e6090c82fcc13eea53f92c3af07a3dd530dc3c67c22de5abfe705112
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f082218d4450a6fd5cba15403298e1e103fd66cae7d3efcef2dce424d9043bc9
3
  size 29034840
last-checkpoint/global_step650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81db18808ef854cee5581ca4f17863358b719281e053752be4de4707e39e43c0
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e857fd390a161f2feb8f5e6a6760418412e53d89c4b054f94b6a141334063763
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fedb43a5f62ade4a23cb9ee6ed4887f51051ae484e0a31b7e386eaf51cd04ee2
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c36d41d30e3fabe68d4d64ac13a18e9b9378b59d3facbd4e9ec1d8d22d58c1
3
+ size 43429616
last-checkpoint/global_step650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f9450a0ced61d6c33a66cdf1b2511022f0ec24416b17e1f16ed49dc98ff62dd
3
+ size 637299
last-checkpoint/global_step650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b9093c4ff061c6453535b14beda5a9d96a531472a58effac685152d63de95
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bfb0ef98b44969e473f6d5c11a92b5a607c48dc2a278b5d72591611de4079
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a41c42f45b9260afbaae285e2a90de1413db2108ea931952bc69942ea6f19e1e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step600
 
1
+ global_step650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b76b388bede074656df32b92902ac42b965557bfee0c930366af07d8382b1b4a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd94ecf5c982ee0e060d3e07a575ce03dc3b0f289b5e32a1f65d3b6366a8a0e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.3090394025238218,
5
  "eval_steps": 50,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,11 +1075,100 @@
1075
  "eval_steps_per_second": 0.795,
1076
  "num_input_tokens_seen": 5990400,
1077
  "step": 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 3400,
1082
- "num_input_tokens_seen": 5990400,
1083
  "num_train_epochs": 2,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
@@ -1094,7 +1183,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 395224583241728.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.3347926860674736,
5
  "eval_steps": 50,
6
+ "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_steps_per_second": 0.795,
1076
  "num_input_tokens_seen": 5990400,
1077
  "step": 600
1078
+ },
1079
+ {
1080
+ "epoch": 0.311614730878187,
1081
+ "grad_norm": 0.06328082378687645,
1082
+ "learning_rate": 9.55911532374151e-05,
1083
+ "loss": 0.0207,
1084
+ "num_input_tokens_seen": 6040320,
1085
+ "step": 605
1086
+ },
1087
+ {
1088
+ "epoch": 0.31419005923255217,
1089
+ "grad_norm": 1.846961607001629,
1090
+ "learning_rate": 9.549077815930636e-05,
1091
+ "loss": 0.023,
1092
+ "num_input_tokens_seen": 6090240,
1093
+ "step": 610
1094
+ },
1095
+ {
1096
+ "epoch": 0.31676538758691736,
1097
+ "grad_norm": 0.07122251919235507,
1098
+ "learning_rate": 9.538932721758474e-05,
1099
+ "loss": 0.017,
1100
+ "num_input_tokens_seen": 6140160,
1101
+ "step": 615
1102
+ },
1103
+ {
1104
+ "epoch": 0.3193407159412825,
1105
+ "grad_norm": 1.9564639611342651,
1106
+ "learning_rate": 9.528680281157999e-05,
1107
+ "loss": 0.009,
1108
+ "num_input_tokens_seen": 6190080,
1109
+ "step": 620
1110
+ },
1111
+ {
1112
+ "epoch": 0.3219160442956477,
1113
+ "grad_norm": 0.43628623054662674,
1114
+ "learning_rate": 9.518320736600943e-05,
1115
+ "loss": 0.0421,
1116
+ "num_input_tokens_seen": 6240000,
1117
+ "step": 625
1118
+ },
1119
+ {
1120
+ "epoch": 0.3244913726500129,
1121
+ "grad_norm": 0.7626652394047067,
1122
+ "learning_rate": 9.507854333092063e-05,
1123
+ "loss": 0.0324,
1124
+ "num_input_tokens_seen": 6289920,
1125
+ "step": 630
1126
+ },
1127
+ {
1128
+ "epoch": 0.32706670100437807,
1129
+ "grad_norm": 0.5823990007842583,
1130
+ "learning_rate": 9.497281318163346e-05,
1131
+ "loss": 0.0139,
1132
+ "num_input_tokens_seen": 6339840,
1133
+ "step": 635
1134
+ },
1135
+ {
1136
+ "epoch": 0.32964202935874326,
1137
+ "grad_norm": 1.6726620150276597,
1138
+ "learning_rate": 9.486601941868154e-05,
1139
+ "loss": 0.0375,
1140
+ "num_input_tokens_seen": 6389760,
1141
+ "step": 640
1142
+ },
1143
+ {
1144
+ "epoch": 0.3322173577131084,
1145
+ "grad_norm": 0.08456122780891835,
1146
+ "learning_rate": 9.475816456775313e-05,
1147
+ "loss": 0.0618,
1148
+ "num_input_tokens_seen": 6439680,
1149
+ "step": 645
1150
+ },
1151
+ {
1152
+ "epoch": 0.3347926860674736,
1153
+ "grad_norm": 0.3287256358007373,
1154
+ "learning_rate": 9.464925117963133e-05,
1155
+ "loss": 0.0327,
1156
+ "num_input_tokens_seen": 6489600,
1157
+ "step": 650
1158
+ },
1159
+ {
1160
+ "epoch": 0.3347926860674736,
1161
+ "eval_loss": 0.02095886506140232,
1162
+ "eval_runtime": 19.1993,
1163
+ "eval_samples_per_second": 3.125,
1164
+ "eval_steps_per_second": 0.781,
1165
+ "num_input_tokens_seen": 6489600,
1166
+ "step": 650
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3400,
1171
+ "num_input_tokens_seen": 6489600,
1172
  "num_train_epochs": 2,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
 
1183
  "attributes": {}
1184
  }
1185
  },
1186
+ "total_flos": 428164562812928.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null