Text Generation
Transformers
Safetensors
mistral
axolotl
finetune
qlora
conversational
Inference Endpoints
text-generation-inference
Weyaxi commited on
Commit
994e072
1 Parent(s): cfa9b51

upload adapter files

Browse files
adapter/README.md ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - axolotl
6
+ - generated_from_trainer
7
+ base_model: openchat/openchat-3.5-0106
8
+ model-index:
9
+ - name: newton-lora
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
17
+ <details><summary>See axolotl config</summary>
18
+
19
+ axolotl version: `0.4.0`
20
+ ```yaml
21
+ base_model: openchat/openchat-3.5-0106
22
+ model_type: MistralForCausalLM
23
+ tokenizer_type: LlamaTokenizer
24
+ is_mistral_derived_model: true
25
+
26
+ load_in_8bit: false
27
+ load_in_4bit: true
28
+ strict: false
29
+
30
+
31
+ datasets:
32
+ - path: merged_all.json
33
+ type:
34
+ field_instruction: instruction
35
+ field_output: output
36
+
37
+ format: "GPT4 Correct User: {instruction}<|end_of_turn|>GPT4 Correct Assistant:"
38
+ no_input_format: "GPT4 Correct User: {instruction}<|end_of_turn|>GPT4 Correct Assistant:"
39
+
40
+
41
+ dataset_prepared_path: last_run_prepared
42
+ val_set_size: 0.01 # not sure
43
+ output_dir: ./newton
44
+
45
+ adapter: qlora
46
+ lora_model_dir:
47
+
48
+ sequence_len: 8192
49
+ sample_packing: true
50
+ pad_to_sequence_len: true
51
+
52
+ lora_r: 128
53
+ lora_alpha: 64
54
+ lora_dropout: 0.05
55
+ lora_target_linear: true
56
+ lora_fan_in_fan_out:
57
+ lora_target_modules:
58
+ - gate_proj
59
+ - down_proj
60
+ - up_proj
61
+ - q_proj
62
+ - v_proj
63
+ - k_proj
64
+ - o_proj
65
+ lora_modules_to_save:
66
+ - embed_tokens
67
+ - lm_head
68
+
69
+ wandb_project: huggingface
70
+ wandb_entity:
71
+ wandb_watch:
72
+ wandb_name:
73
+ wandb_log_model:
74
+
75
+ hub_model_id: Weyaxi/newton-lora
76
+ save_safetensors: true
77
+
78
+ # change #
79
+ gradient_accumulation_steps: 12
80
+ micro_batch_size: 6
81
+ num_epochs: 2
82
+ optimizer: adamw_bnb_8bit
83
+ lr_scheduler: cosine
84
+ learning_rate: 0.0002
85
+ # change #
86
+
87
+ train_on_inputs: false
88
+ group_by_length: false
89
+ bf16: true
90
+ fp16: false
91
+ tf32: false
92
+
93
+ gradient_checkpointing: true
94
+ early_stopping_patience:
95
+ resume_from_checkpoint:
96
+ local_rank:
97
+ logging_steps: 1
98
+ xformers_attention:
99
+ flash_attention: true
100
+
101
+ warmup_steps: 10 # not sure
102
+
103
+ saves_per_epoch: 2
104
+
105
+ evals_per_epoch: 4
106
+ eval_table_size:
107
+ eval_table_max_new_tokens: 128
108
+
109
+ debug:
110
+ deepspeed:
111
+ weight_decay: 0.1 # not sure
112
+ fsdp:
113
+ fsdp_config:
114
+ special_tokens:
115
+ bos_token: "<s>"
116
+ eos_token: "</s>"
117
+ unk_token: "<unk>"
118
+ tokens:
119
+ - "<|end_of_turn|>"
120
+ - "<|pad_0|>"
121
+
122
+
123
+ ```
124
+
125
+ </details><br>
126
+
127
+ # newton-lora
128
+
129
+ This model is a fine-tuned version of [openchat/openchat-3.5-0106](https://huggingface.co/openchat/openchat-3.5-0106) on the None dataset.
130
+ It achieves the following results on the evaluation set:
131
+ - Loss: 0.0800
132
+
133
+ ## Model description
134
+
135
+ More information needed
136
+
137
+ ## Intended uses & limitations
138
+
139
+ More information needed
140
+
141
+ ## Training and evaluation data
142
+
143
+ More information needed
144
+
145
+ ## Training procedure
146
+
147
+ ### Training hyperparameters
148
+
149
+ The following hyperparameters were used during training:
150
+ - learning_rate: 0.0002
151
+ - train_batch_size: 6
152
+ - eval_batch_size: 6
153
+ - seed: 42
154
+ - gradient_accumulation_steps: 12
155
+ - total_train_batch_size: 72
156
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
157
+ - lr_scheduler_type: cosine
158
+ - lr_scheduler_warmup_steps: 10
159
+ - num_epochs: 2
160
+
161
+ ### Training results
162
+
163
+ | Training Loss | Epoch | Step | Validation Loss |
164
+ |:-------------:|:-----:|:----:|:---------------:|
165
+ | 0.6925 | 0.02 | 1 | 1.3667 |
166
+ | 0.5622 | 0.25 | 16 | 0.3390 |
167
+ | 0.5269 | 0.5 | 32 | 0.1395 |
168
+ | 0.5343 | 0.75 | 48 | 0.1048 |
169
+ | 0.515 | 1.01 | 64 | 0.0904 |
170
+ | 0.3971 | 1.24 | 80 | 0.0854 |
171
+ | 0.3889 | 1.49 | 96 | 0.0820 |
172
+ | 0.3864 | 1.74 | 112 | 0.0800 |
173
+
174
+
175
+ ### Framework versions
176
+
177
+ - PEFT 0.7.2.dev0
178
+ - Transformers 4.37.0
179
+ - Pytorch 2.1.2+cu118
180
+ - Datasets 2.16.1
181
+ - Tokenizers 0.15.0
adapter/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "openchat/openchat-3.5-0106",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 64,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": [
17
+ "embed_tokens",
18
+ "lm_head"
19
+ ],
20
+ "peft_type": "LORA",
21
+ "r": 128,
22
+ "rank_pattern": {},
23
+ "revision": null,
24
+ "target_modules": [
25
+ "up_proj",
26
+ "k_proj",
27
+ "q_proj",
28
+ "v_proj",
29
+ "gate_proj",
30
+ "o_proj",
31
+ "down_proj"
32
+ ],
33
+ "task_type": "CAUSAL_LM"
34
+ }
adapter/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40fd555db097a84bee437bf2145bbb963fea1baaafb24e027ec45684e122e966
3
+ size 1866559560
adapter/added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|end_of_turn|>": 32000,
3
+ "<|pad_0|>": 32001
4
+ }
adapter/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openchat/openchat-3.5-0106",
3
+ "architectures": [
4
+ "MistralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 4096,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 8192,
14
+ "model_type": "mistral",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
18
+ "quantization_config": {
19
+ "bnb_4bit_compute_dtype": "bfloat16",
20
+ "bnb_4bit_quant_type": "nf4",
21
+ "bnb_4bit_use_double_quant": true,
22
+ "llm_int8_enable_fp32_cpu_offload": false,
23
+ "llm_int8_has_fp16_weight": false,
24
+ "llm_int8_skip_modules": null,
25
+ "llm_int8_threshold": 6.0,
26
+ "load_in_4bit": true,
27
+ "load_in_8bit": false,
28
+ "quant_method": "bitsandbytes"
29
+ },
30
+ "rms_norm_eps": 1e-05,
31
+ "rope_theta": 10000.0,
32
+ "sliding_window": 4096,
33
+ "tie_word_embeddings": false,
34
+ "torch_dtype": "bfloat16",
35
+ "transformers_version": "4.37.0",
36
+ "use_cache": false,
37
+ "vocab_size": 32002
38
+ }
adapter/special_tokens_map.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|end_of_turn|>",
4
+ "<|pad_0|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<s>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": "</s>",
21
+ "unk_token": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ }
28
+ }
adapter/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
adapter/tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "32000": {
30
+ "content": "<|end_of_turn|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": false
36
+ },
37
+ "32001": {
38
+ "content": "<|pad_0|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": false
44
+ }
45
+ },
46
+ "additional_special_tokens": [
47
+ "<|end_of_turn|>",
48
+ "<|pad_0|>"
49
+ ],
50
+ "bos_token": "<s>",
51
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
52
+ "clean_up_tokenization_spaces": false,
53
+ "eos_token": "</s>",
54
+ "legacy": true,
55
+ "model_max_length": 1000000000000000019884624838656,
56
+ "pad_token": "</s>",
57
+ "sp_model_kwargs": {},
58
+ "spaces_between_special_tokens": false,
59
+ "tokenizer_class": "LlamaTokenizer",
60
+ "trust_remote_code": false,
61
+ "unk_token": "<unk>",
62
+ "use_default_system_prompt": true,
63
+ "use_fast": true
64
+ }
adapter/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f5ad4b647c026438c48baa224d24e9d26d37d4f6e34861a9fc68e554a695160
3
+ size 5240