andysalerno commited on
Commit
37292fc
1 Parent(s): 45ba067

Create axolotl.yml

Browse files
Files changed (1) hide show
  1. axolotl.yml +90 -0
axolotl.yml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: ./mistralai/Mistral-7B-v0.1-chatml
2
+ model_type: MistralForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_mistral_derived_model: true
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ datasets:
11
+ - path: andysalerno/ansalern-nectar-inputoutput
12
+ type:
13
+ field_instruction: input
14
+ field_output: output
15
+ format: "{instruction}"
16
+ no_input_format: "{instruction}"
17
+ dataset_prepared_path: last_run_prepared
18
+ val_set_size: 0.005
19
+ output_dir: ./qlora-out
20
+
21
+ adapter: qlora
22
+ lora_model_dir:
23
+
24
+ sequence_len: 8192
25
+ sample_packing: true # was true
26
+ eval_sample_packing: false
27
+ pad_to_sequence_len: true
28
+
29
+ lora_r: 32
30
+ lora_alpha: 16
31
+ lora_dropout: 0.05
32
+ lora_target_linear: true
33
+ lora_fan_in_fan_out:
34
+ lora_modules_to_save: ['embed_tokens', 'lm_head']
35
+ lora_target_modules:
36
+ - gate_proj
37
+ - down_proj
38
+ - up_proj
39
+ - q_proj
40
+ - v_proj
41
+ - k_proj
42
+ - o_proj
43
+ - embed_tokens
44
+ - lm_head
45
+
46
+ wandb_project: axolotl
47
+ wandb_entity:
48
+ wandb_watch:
49
+ wandb_name:
50
+ wandb_log_model:
51
+
52
+ gradient_accumulation_steps: 4
53
+ micro_batch_size: 2
54
+ num_epochs: 1
55
+ optimizer: adamw_bnb_8bit
56
+ lr_scheduler: cosine
57
+ learning_rate: 0.00005
58
+
59
+ train_on_inputs: false
60
+ group_by_length: false
61
+ bf16: auto
62
+ fp16:
63
+ tf32: false
64
+
65
+ gradient_checkpointing: true
66
+ early_stopping_patience: 3
67
+ resume_from_checkpoint:
68
+ local_rank:
69
+ logging_steps: 1
70
+ xformers_attention:
71
+ flash_attention: true
72
+
73
+ loss_watchdog_threshold: 5.0
74
+ loss_watchdog_patience: 3
75
+
76
+ warmup_steps: 0.1
77
+ eval_steps: 50
78
+ eval_table_size:
79
+ eval_table_max_new_tokens: 128
80
+ save_steps: 300
81
+ max_steps: 300
82
+ debug:
83
+ deepspeed:
84
+ weight_decay: 0.0
85
+ fsdp:
86
+ fsdp_config:
87
+ special_tokens:
88
+ bos_token: "<|im_start|>"
89
+ eos_token: "<|im_end|>"
90
+ unk_token: "<unk>"