ben81828 commited on
Commit
7d60ed9
·
verified ·
1 Parent(s): 37fdae2

Training in progress, step 1700, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -19,7 +19,7 @@
19
  "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
- "target_modules": "^(?!.*patch_embed).*(?:fc1|proj|down_proj|fc2|qkv|o_proj|q_proj|k_proj|up_proj|gate_proj|v_proj).*",
23
  "task_type": "CAUSAL_LM",
24
  "use_dora": false,
25
  "use_rslora": false
 
19
  "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
+ "target_modules": "^(?!.*patch_embed).*(?:q_proj|qkv|gate_proj|fc1|up_proj|k_proj|o_proj|fc2|down_proj|v_proj|proj).*",
23
  "task_type": "CAUSAL_LM",
24
  "use_dora": false,
25
  "use_rslora": false
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a1114471ad61d66a88788a2005b7be221cdbe9e2fd3f43e9a2185e89b6fad6f
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2509fd541c293692844871c3c3ef3462376501768f133b9acc8bdb3a34cf0369
3
  size 29034840
last-checkpoint/global_step1700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8885e80593f59dd8f41d83ce9a151af8c387d0ebcc84e3ab5c45521cef48e024
3
+ size 43429616
last-checkpoint/global_step1700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac23743eab6a8a26c5e4e34bbd45e4d7effe85489304d1cccfe59cd520556cbf
3
+ size 43429616
last-checkpoint/global_step1700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06a9a24a9a023713b68fd2f7012da45ebc0ba42ebe90a145a3f42eab189a375f
3
+ size 43429616
last-checkpoint/global_step1700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e5844c733849dbecf263fea3ef642e6699dbd3f922b6e449dfad68db5d01fd0
3
+ size 43429616
last-checkpoint/global_step1700/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06ba06283edb02eda2732a8265d8207d60eb1fef7f9e3ae5c1ea8d90c17b0715
3
+ size 637299
last-checkpoint/global_step1700/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da84df26158916db26311a33b5233d31790e0ff29bbd78fcce9a0b80073a2f46
3
+ size 637171
last-checkpoint/global_step1700/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0650a802205c7a65d67b752891c3c2aa99e1d36f1b08f1727af28cb5064e46fe
3
+ size 637171
last-checkpoint/global_step1700/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db65b2203742cda52b00e5cd5927c8817de12627c99794dbca6286f744f71a1d
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1650
 
1
+ global_step1700
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a90384755f5b036b42b1a465b39dbf24a925a02c04294f9d684bc1de7f4db1e5
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e7c17922709137dd2f358be87cc431f7959a56821e2d051582e81679e2d388e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7621b41e55056218f97d5b32ae116de3304a677b9f27b6a62170d83a2bbff176
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf0ec3cc431efdc1cb595b27e74fc020021d53f5c11850d7490a79bf42b71d6
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:997e9debadfd125b5c8b66ee6dd79ced3d40d353ff9250475f3814fd950012a6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148fd0a663c121acf74d815964bc0d08d07065e8503b03adfd967fdaf63a6abc
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f0256032419959580948d742425f66782bc8eb029126a091669a42c6ee0eba4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56589f6c34527656fe0dc752874cfc1460efbb5c0c3f874c4cd09d6415dd16c1
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40442cc35519e6454a07ba94c151e6fd6557a2a4ddd9970fc5771865c93790aa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3fef512af59d169b441ed68987893abefdd92d103d53116c260b7a374ebbd3c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
- "epoch": 0.4874446085672083,
5
  "eval_steps": 50,
6
- "global_step": 1650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2944,11 +2944,100 @@
2944
  "eval_steps_per_second": 0.77,
2945
  "num_input_tokens_seen": 17130896,
2946
  "step": 1650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2947
  }
2948
  ],
2949
  "logging_steps": 5,
2950
  "max_steps": 6770,
2951
- "num_input_tokens_seen": 17130896,
2952
  "num_train_epochs": 2,
2953
  "save_steps": 50,
2954
  "stateful_callbacks": {
@@ -2963,7 +3052,7 @@
2963
  "attributes": {}
2964
  }
2965
  },
2966
- "total_flos": 1130167189372928.0,
2967
  "train_batch_size": 1,
2968
  "trial_name": null,
2969
  "trial_params": null
 
1
  {
2
  "best_metric": 0.28714123368263245,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-1550",
4
+ "epoch": 0.5022156573116692,
5
  "eval_steps": 50,
6
+ "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2944
  "eval_steps_per_second": 0.77,
2945
  "num_input_tokens_seen": 17130896,
2946
  "step": 1650
2947
+ },
2948
+ {
2949
+ "epoch": 0.48892171344165436,
2950
+ "grad_norm": 0.9908097045731693,
2951
+ "learning_rate": 9.00187436942368e-05,
2952
+ "loss": 0.2956,
2953
+ "num_input_tokens_seen": 17182896,
2954
+ "step": 1655
2955
+ },
2956
+ {
2957
+ "epoch": 0.49039881831610044,
2958
+ "grad_norm": 2.7463420963023415,
2959
+ "learning_rate": 8.994540932752167e-05,
2960
+ "loss": 0.3027,
2961
+ "num_input_tokens_seen": 17235552,
2962
+ "step": 1660
2963
+ },
2964
+ {
2965
+ "epoch": 0.4918759231905465,
2966
+ "grad_norm": 12.1024464355288,
2967
+ "learning_rate": 8.987183664689511e-05,
2968
+ "loss": 0.3295,
2969
+ "num_input_tokens_seen": 17286816,
2970
+ "step": 1665
2971
+ },
2972
+ {
2973
+ "epoch": 0.4933530280649926,
2974
+ "grad_norm": 1.7833060218575738,
2975
+ "learning_rate": 8.9798026091291e-05,
2976
+ "loss": 0.3201,
2977
+ "num_input_tokens_seen": 17339072,
2978
+ "step": 1670
2979
+ },
2980
+ {
2981
+ "epoch": 0.4948301329394387,
2982
+ "grad_norm": 1.7534258589471414,
2983
+ "learning_rate": 8.972397810106235e-05,
2984
+ "loss": 0.3044,
2985
+ "num_input_tokens_seen": 17391288,
2986
+ "step": 1675
2987
+ },
2988
+ {
2989
+ "epoch": 0.4963072378138848,
2990
+ "grad_norm": 3.562194425832391,
2991
+ "learning_rate": 8.964969311797871e-05,
2992
+ "loss": 0.2781,
2993
+ "num_input_tokens_seen": 17443456,
2994
+ "step": 1680
2995
+ },
2996
+ {
2997
+ "epoch": 0.4977843426883309,
2998
+ "grad_norm": 3.1653143619735484,
2999
+ "learning_rate": 8.957517158522359e-05,
3000
+ "loss": 0.423,
3001
+ "num_input_tokens_seen": 17494832,
3002
+ "step": 1685
3003
+ },
3004
+ {
3005
+ "epoch": 0.49926144756277696,
3006
+ "grad_norm": 9.755061601968617,
3007
+ "learning_rate": 8.950041394739168e-05,
3008
+ "loss": 0.2747,
3009
+ "num_input_tokens_seen": 17547384,
3010
+ "step": 1690
3011
+ },
3012
+ {
3013
+ "epoch": 0.5007385524372231,
3014
+ "grad_norm": 6.30268931800531,
3015
+ "learning_rate": 8.942542065048632e-05,
3016
+ "loss": 0.3162,
3017
+ "num_input_tokens_seen": 17599120,
3018
+ "step": 1695
3019
+ },
3020
+ {
3021
+ "epoch": 0.5022156573116692,
3022
+ "grad_norm": 16.220873737158193,
3023
+ "learning_rate": 8.935019214191672e-05,
3024
+ "loss": 0.3904,
3025
+ "num_input_tokens_seen": 17650984,
3026
+ "step": 1700
3027
+ },
3028
+ {
3029
+ "epoch": 0.5022156573116692,
3030
+ "eval_loss": 0.3297054171562195,
3031
+ "eval_runtime": 47.9781,
3032
+ "eval_samples_per_second": 1.251,
3033
+ "eval_steps_per_second": 0.313,
3034
+ "num_input_tokens_seen": 17650984,
3035
+ "step": 1700
3036
  }
3037
  ],
3038
  "logging_steps": 5,
3039
  "max_steps": 6770,
3040
+ "num_input_tokens_seen": 17650984,
3041
  "num_train_epochs": 2,
3042
  "save_steps": 50,
3043
  "stateful_callbacks": {
 
3052
  "attributes": {}
3053
  }
3054
  },
3055
+ "total_flos": 1164428646285312.0,
3056
  "train_batch_size": 1,
3057
  "trial_name": null,
3058
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dfb4b4312b0dec56488d15e8a46a372ea451bb29daae5dc2f31b7c95a1a9b038
3
  size 7480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:157f40a64e5df8b268b803366ba3fce3e3b5e6e2f20d7c81bfc44726f0bee4e8
3
  size 7480