nblinh commited on
Commit
322308e
1 Parent(s): 9175a17

End of training

Browse files
README.md CHANGED
@@ -78,7 +78,7 @@ resume_from_checkpoint: null
78
  s2_attention: null
79
  sample_packing: false
80
  saves_per_epoch: 1
81
- sequence_len: 1024
82
  strict: false
83
  tf32: false
84
  tokenizer_type: AutoTokenizer
@@ -103,7 +103,7 @@ xformers_attention: true
103
 
104
  This model is a fine-tuned version of [unsloth/OpenHermes-2.5-Mistral-7B](https://huggingface.co/unsloth/OpenHermes-2.5-Mistral-7B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
- - Loss: 1.0327
107
 
108
  ## Model description
109
 
@@ -135,7 +135,7 @@ The following hyperparameters were used during training:
135
 
136
  | Training Loss | Epoch | Step | Validation Loss |
137
  |:-------------:|:------:|:----:|:---------------:|
138
- | 0.5937 | 0.0002 | 10 | 1.0327 |
139
 
140
 
141
  ### Framework versions
 
78
  s2_attention: null
79
  sample_packing: false
80
  saves_per_epoch: 1
81
+ sequence_len: 2048
82
  strict: false
83
  tf32: false
84
  tokenizer_type: AutoTokenizer
 
103
 
104
  This model is a fine-tuned version of [unsloth/OpenHermes-2.5-Mistral-7B](https://huggingface.co/unsloth/OpenHermes-2.5-Mistral-7B) on the None dataset.
105
  It achieves the following results on the evaluation set:
106
+ - Loss: 1.0232
107
 
108
  ## Model description
109
 
 
135
 
136
  | Training Loss | Epoch | Step | Validation Loss |
137
  |:-------------:|:------:|:----:|:---------------:|
138
+ | 0.6089 | 0.0002 | 10 | 1.0232 |
139
 
140
 
141
  ### Framework versions
adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "o_proj",
24
- "gate_proj",
25
  "k_proj",
26
- "up_proj",
27
- "v_proj",
28
  "down_proj",
29
- "q_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
  "o_proj",
 
25
  "k_proj",
26
+ "q_proj",
 
27
  "down_proj",
28
+ "gate_proj",
29
+ "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46ec8880499f89d5403fc45529870841b99bb6cab5cb8816f6a74363e5478652
3
  size 167934026
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b85cfb6bb507fefdb2a74775fa20eaf4dda140e5d1240a6a9fc8ba39b081c4de
3
  size 167934026
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57b54c4a5f3aaf004c53963acdbe2dc25b6be2b4f73c29ff21eca4d93cc77cec
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c2136bdf3b08dda591d31ea9ec259466397f7eecda38e741c2119d4d17f20f7
3
  size 167832240
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81d78f054753df589dbf89684430fa2bb4fa5a6f42a289a955cf765a35071f6c
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6502b390e3141ce975612fab060b8ed3fba765bdba7ec701f7f6a1d4ebf503a0
3
  size 6776