End of training
Browse files- README.md +3 -3
- adapter_config.json +4 -4
- adapter_model.bin +1 -1
- adapter_model.safetensors +1 -1
- training_args.bin +1 -1
README.md
CHANGED
@@ -78,7 +78,7 @@ resume_from_checkpoint: null
|
|
78 |
s2_attention: null
|
79 |
sample_packing: false
|
80 |
saves_per_epoch: 1
|
81 |
-
sequence_len:
|
82 |
strict: false
|
83 |
tf32: false
|
84 |
tokenizer_type: AutoTokenizer
|
@@ -103,7 +103,7 @@ xformers_attention: true
|
|
103 |
|
104 |
This model is a fine-tuned version of [unsloth/OpenHermes-2.5-Mistral-7B](https://huggingface.co/unsloth/OpenHermes-2.5-Mistral-7B) on the None dataset.
|
105 |
It achieves the following results on the evaluation set:
|
106 |
-
- Loss: 1.
|
107 |
|
108 |
## Model description
|
109 |
|
@@ -135,7 +135,7 @@ The following hyperparameters were used during training:
|
|
135 |
|
136 |
| Training Loss | Epoch | Step | Validation Loss |
|
137 |
|:-------------:|:------:|:----:|:---------------:|
|
138 |
-
| 0.
|
139 |
|
140 |
|
141 |
### Framework versions
|
|
|
78 |
s2_attention: null
|
79 |
sample_packing: false
|
80 |
saves_per_epoch: 1
|
81 |
+
sequence_len: 2048
|
82 |
strict: false
|
83 |
tf32: false
|
84 |
tokenizer_type: AutoTokenizer
|
|
|
103 |
|
104 |
This model is a fine-tuned version of [unsloth/OpenHermes-2.5-Mistral-7B](https://huggingface.co/unsloth/OpenHermes-2.5-Mistral-7B) on the None dataset.
|
105 |
It achieves the following results on the evaluation set:
|
106 |
+
- Loss: 1.0232
|
107 |
|
108 |
## Model description
|
109 |
|
|
|
135 |
|
136 |
| Training Loss | Epoch | Step | Validation Loss |
|
137 |
|:-------------:|:------:|:----:|:---------------:|
|
138 |
+
| 0.6089 | 0.0002 | 10 | 1.0232 |
|
139 |
|
140 |
|
141 |
### Framework versions
|
adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
23 |
"o_proj",
|
24 |
-
"gate_proj",
|
25 |
"k_proj",
|
26 |
-
"
|
27 |
-
"v_proj",
|
28 |
"down_proj",
|
29 |
-
"
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
"o_proj",
|
|
|
25 |
"k_proj",
|
26 |
+
"q_proj",
|
|
|
27 |
"down_proj",
|
28 |
+
"gate_proj",
|
29 |
+
"up_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
adapter_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167934026
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b85cfb6bb507fefdb2a74775fa20eaf4dda140e5d1240a6a9fc8ba39b081c4de
|
3 |
size 167934026
|
adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 167832240
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3c2136bdf3b08dda591d31ea9ec259466397f7eecda38e741c2119d4d17f20f7
|
3 |
size 167832240
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6502b390e3141ce975612fab060b8ed3fba765bdba7ec701f7f6a1d4ebf503a0
|
3 |
size 6776
|