ncbateman commited on
Commit
8c7c087
1 Parent(s): 061a4da

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- base_model: unsloth/Llama-3.2-1B-Instruct
3
  library_name: peft
4
  ---
5
 
 
1
  ---
2
+ base_model: unsloth/Llama-3.2-3B-Instruct
3
  library_name: peft
4
  ---
5
 
last-checkpoint/adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "unsloth/Llama-3.2-1B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": null,
7
  "inference_mode": true,
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "up_proj",
 
24
  "v_proj",
 
 
25
  "down_proj",
26
- "k_proj",
27
- "q_proj",
28
- "o_proj",
29
- "gate_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/Llama-3.2-3B-Instruct",
5
  "bias": "none",
6
  "fan_in_fan_out": null,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "o_proj",
24
+ "q_proj",
25
  "v_proj",
26
+ "up_proj",
27
+ "gate_proj",
28
  "down_proj",
29
+ "k_proj"
 
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:271a55b308cc7682a873c6a99c9cda26d6ab71e97899f74b1d4a08129d2aa236
3
- size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e681c7412ef2557f51f437204f7dbbccb418dccefea52569f201f7a05c61feb
3
+ size 97307544
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f645a4cca6acce995a28c6a73628ab6507e4d274200076d1a497788422b8df35
3
- size 23159546
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:196d52d70a6910ff8c67869e7cbbe76c44d57cc489019ff6d2baf29ba21ca21c
3
+ size 49846260
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:898bf75a87a6acd77c2a3359f2976e804365bed481ed60f6d31384f9b04f1767
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bec6c83fbb7d43296cc5ab0e300576282a47308ba5787731efc1f099f27e291
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5cef1807e34750e90e3b15895beee76987ca821b1c92ed984e880432329fc5d1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:814b79b47e12bde76b22a1ac4fd2f1e7ddf84c332d0b19c3538a1fad1c6cc96e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:538ddb675c19e11b2b5d087f2549e85b6180656767f11457e34a8f69d9e78e58
3
  size 6648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8524cfa1063fe98d8aea167ded37880cba9a9019662d453eab8ed82b8700126
3
  size 6648