chargoddard
commited on
Commit
•
5c7d89e
1
Parent(s):
cdd06a6
Add files
Browse files- README.md +33 -0
- adapter_config.json +25 -0
- adapter_model.bin +3 -0
README.md
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
datasets:
|
4 |
+
- EleutherAI/wikitext_document_level
|
5 |
+
language:
|
6 |
+
- en
|
7 |
+
---
|
8 |
+
|
9 |
+
[<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
10 |
+
|
11 |
+
LLaMA 33b finetuned on `wikitext_document_level` with a combination of both linear and NTK-aware ROPE scaling.
|
12 |
+
|
13 |
+
Trained with alpha=4, scale=2.
|
14 |
+
|
15 |
+
<img src="perplexity.png" alt="Perplexity Graph" />
|
16 |
+
|
17 |
+
## Training procedure
|
18 |
+
|
19 |
+
|
20 |
+
The following `bitsandbytes` quantization config was used during training:
|
21 |
+
- load_in_8bit: False
|
22 |
+
- load_in_4bit: True
|
23 |
+
- llm_int8_threshold: 6.0
|
24 |
+
- llm_int8_skip_modules: None
|
25 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
26 |
+
- llm_int8_has_fp16_weight: False
|
27 |
+
- bnb_4bit_quant_type: nf4
|
28 |
+
- bnb_4bit_use_double_quant: True
|
29 |
+
- bnb_4bit_compute_dtype: bfloat16
|
30 |
+
### Framework versions
|
31 |
+
|
32 |
+
|
33 |
+
- PEFT 0.4.0.dev0
|
adapter_config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "huggyllama/llama-30b",
|
3 |
+
"bias": "none",
|
4 |
+
"fan_in_fan_out": null,
|
5 |
+
"inference_mode": true,
|
6 |
+
"init_lora_weights": true,
|
7 |
+
"layers_pattern": null,
|
8 |
+
"layers_to_transform": null,
|
9 |
+
"lora_alpha": 128,
|
10 |
+
"lora_dropout": 0.05,
|
11 |
+
"modules_to_save": null,
|
12 |
+
"peft_type": "LORA",
|
13 |
+
"r": 64,
|
14 |
+
"revision": null,
|
15 |
+
"target_modules": [
|
16 |
+
"o_proj",
|
17 |
+
"gate_proj",
|
18 |
+
"v_proj",
|
19 |
+
"q_proj",
|
20 |
+
"k_proj",
|
21 |
+
"up_proj",
|
22 |
+
"down_proj"
|
23 |
+
],
|
24 |
+
"task_type": "CAUSAL_LM"
|
25 |
+
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51c5330ea8267a9a608db609ab12856711e2d3bd86a38a9944a28b810674be50
|
3 |
+
size 1950654765
|