ben81828 commited on
Commit
9a18d68
·
verified ·
1 Parent(s): 09b34f6

Training in progress, step 2300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fef821b7ed03f4855afe73a282dfe1eaf891fe7aec11dbae66d77bf1e75802cb
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81cfb0ee5389022e6fe86935c49629e5dcdacdc2bddc774ca113221a60166576
3
  size 18516456
last-checkpoint/global_step2299/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82c5963b53be7bf4ac988682be435545c879b80467ee44486db35514a3dd16f8
3
+ size 27700976
last-checkpoint/global_step2299/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44aa3615153bf253d897162ea20869121b8ae8927d27c350e7a308f12dc94a1a
3
+ size 27700976
last-checkpoint/global_step2299/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6224f246a16fea820cc08f89f1f4e6de627075ef496f2a9c1e3b0e8061dec8b8
3
+ size 27700976
last-checkpoint/global_step2299/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e229d62f02874b000b920fdd497f10658e226f07fa6e27065139834506b0a58
3
+ size 27700976
last-checkpoint/global_step2299/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19668b184fd53a9ee5458aa56ddc85a6ca6e8304d2bff2460c472b9f14503bb3
3
+ size 411571
last-checkpoint/global_step2299/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b400e32c99c430b7030ac0d8014b994f40222986d788094945a538a49140891
3
+ size 411507
last-checkpoint/global_step2299/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21ee002c79d745c31b162ae6ceed53b333aa893fdbbdc34482e6406dbdd3c64f
3
+ size 411507
last-checkpoint/global_step2299/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5a1e8275ab5d8b357ecea79a83241cf6e62a645d321f2a23a6ae5efec49b3bc
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2249
 
1
+ global_step2299
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d25cbcbbaa0866ea9c7365cb49b84e805db119693e615f5a1898a6ebfe997e8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f12bf3da75454e5aae4644f2a1d46fdf90f68e680dbf5bdaa86861f825d32d80
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a883389afac12125c2c6bf62631b7de0220fdb0020d24cd0c6e8f8858dd3b362
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2faef1b8798e7516fd96ee7b3363866a8f97ca2d0ec5a8dd27bbfe70b0c6a733
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:577d49de6d60035e159d9ebb1e6eabef79a55787b14ecea93a6a93c242661779
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f35f6d27fbd414dd4285d91816b37fb6b97ff10fbba4c074d56ad2a7f723033
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5f2dd1c21e06806a9ce39eeab45734dfb8a62b829f91a86d1f65f13102d6242
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5c64dfc7e3b2729065483dfef8e4bb0af0d9bae32df888d258ee3c2859d676
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6909ad505e808e7099dbcdd8062e5535575cbfa3b4d3a7b7d3390e6a93ed3b49
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25aeb77fef385fcfd5658b6aaea11fb8aa276239b7ba19a4f3504b86f45fbcc9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.158640226628895,
5
  "eval_steps": 50,
6
- "global_step": 2250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4012,11 +4012,100 @@
4012
  "eval_steps_per_second": 0.94,
4013
  "num_input_tokens_seen": 26310336,
4014
  "step": 2250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4015
  }
4016
  ],
4017
  "logging_steps": 5,
4018
  "max_steps": 3400,
4019
- "num_input_tokens_seen": 26310336,
4020
  "num_train_epochs": 2,
4021
  "save_steps": 50,
4022
  "stateful_callbacks": {
@@ -4031,7 +4120,7 @@
4031
  "attributes": {}
4032
  }
4033
  },
4034
- "total_flos": 1477446954123264.0,
4035
  "train_batch_size": 1,
4036
  "trial_name": null,
4037
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.184393510172547,
5
  "eval_steps": 50,
6
+ "global_step": 2300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4012
  "eval_steps_per_second": 0.94,
4013
  "num_input_tokens_seen": 26310336,
4014
  "step": 2250
4015
+ },
4016
+ {
4017
+ "epoch": 1.1612155549832603,
4018
+ "grad_norm": 6.312367706992343,
4019
+ "learning_rate": 2.7931010410023518e-05,
4020
+ "loss": 0.3547,
4021
+ "num_input_tokens_seen": 26368840,
4022
+ "step": 2255
4023
+ },
4024
+ {
4025
+ "epoch": 1.1637908833376256,
4026
+ "grad_norm": 6.429493717694784,
4027
+ "learning_rate": 2.771308221117309e-05,
4028
+ "loss": 0.3125,
4029
+ "num_input_tokens_seen": 26427280,
4030
+ "step": 2260
4031
+ },
4032
+ {
4033
+ "epoch": 1.1663662116919906,
4034
+ "grad_norm": 6.993677707266103,
4035
+ "learning_rate": 2.749568110121545e-05,
4036
+ "loss": 0.3521,
4037
+ "num_input_tokens_seen": 26485760,
4038
+ "step": 2265
4039
+ },
4040
+ {
4041
+ "epoch": 1.1689415400463559,
4042
+ "grad_norm": 5.03743116566882,
4043
+ "learning_rate": 2.7278812221718924e-05,
4044
+ "loss": 0.281,
4045
+ "num_input_tokens_seen": 26544224,
4046
+ "step": 2270
4047
+ },
4048
+ {
4049
+ "epoch": 1.1715168684007211,
4050
+ "grad_norm": 5.828198718501714,
4051
+ "learning_rate": 2.7062480701664488e-05,
4052
+ "loss": 0.3653,
4053
+ "num_input_tokens_seen": 26602712,
4054
+ "step": 2275
4055
+ },
4056
+ {
4057
+ "epoch": 1.1740921967550864,
4058
+ "grad_norm": 6.1247491578050655,
4059
+ "learning_rate": 2.6846691657324473e-05,
4060
+ "loss": 0.3964,
4061
+ "num_input_tokens_seen": 26661160,
4062
+ "step": 2280
4063
+ },
4064
+ {
4065
+ "epoch": 1.1766675251094514,
4066
+ "grad_norm": 6.231155247277189,
4067
+ "learning_rate": 2.663145019214163e-05,
4068
+ "loss": 0.3119,
4069
+ "num_input_tokens_seen": 26719648,
4070
+ "step": 2285
4071
+ },
4072
+ {
4073
+ "epoch": 1.1792428534638166,
4074
+ "grad_norm": 6.501604840456734,
4075
+ "learning_rate": 2.6416761396608362e-05,
4076
+ "loss": 0.3832,
4077
+ "num_input_tokens_seen": 26778112,
4078
+ "step": 2290
4079
+ },
4080
+ {
4081
+ "epoch": 1.1818181818181819,
4082
+ "grad_norm": 5.377003761278013,
4083
+ "learning_rate": 2.6202630348146324e-05,
4084
+ "loss": 0.3277,
4085
+ "num_input_tokens_seen": 26836592,
4086
+ "step": 2295
4087
+ },
4088
+ {
4089
+ "epoch": 1.184393510172547,
4090
+ "grad_norm": 4.826044073542379,
4091
+ "learning_rate": 2.598906211098643e-05,
4092
+ "loss": 0.3877,
4093
+ "num_input_tokens_seen": 26895096,
4094
+ "step": 2300
4095
+ },
4096
+ {
4097
+ "epoch": 1.184393510172547,
4098
+ "eval_loss": 0.727741539478302,
4099
+ "eval_runtime": 15.9289,
4100
+ "eval_samples_per_second": 3.767,
4101
+ "eval_steps_per_second": 0.942,
4102
+ "num_input_tokens_seen": 26895096,
4103
+ "step": 2300
4104
  }
4105
  ],
4106
  "logging_steps": 5,
4107
  "max_steps": 3400,
4108
+ "num_input_tokens_seen": 26895096,
4109
  "num_train_epochs": 2,
4110
  "save_steps": 50,
4111
  "stateful_callbacks": {
 
4120
  "attributes": {}
4121
  }
4122
  },
4123
+ "total_flos": 1510284389515264.0,
4124
  "train_batch_size": 1,
4125
  "trial_name": null,
4126
  "trial_params": null