Delta-Vector commited on
Commit
6029823
·
verified ·
1 Parent(s): 5a10316

Update GnX.yml

Browse files
Files changed (1) hide show
  1. GnX.yml +80 -80
GnX.yml CHANGED
@@ -1,81 +1,81 @@
1
- base_model: Delta-Vector/Holland-4B
2
- model_type: AutoModelForCausalLM
3
- tokenizer_type: AutoTokenizer
4
-
5
- load_in_8bit: false
6
- load_in_4bit: false
7
- strict: false
8
-
9
- datasets:
10
- - path: NewEden/xlam-function-calling-60k-shareGPT
11
- type: sharegpt
12
- conversation: chatml
13
- - path: gardner/glaive-function-calling-v2-sharegpt
14
- type: sharegpt
15
- conversation: chatml
16
-
17
- chat_template: chatml
18
-
19
- val_set_size: 0.01
20
- output_dir: ./outputs/out
21
-
22
- adapter:
23
- lora_r:
24
- lora_alpha:
25
- lora_dropout:
26
- lora_target_linear:
27
-
28
- sequence_len: 16384
29
- # sequence_len: 32768
30
- sample_packing: true
31
- eval_sample_packing: false
32
- pad_to_sequence_len: true
33
-
34
- plugins:
35
- - axolotl.integrations.liger.LigerPlugin
36
- liger_rope: true
37
- liger_rms_norm: true
38
- liger_swiglu: true
39
- liger_fused_linear_cross_entropy: true
40
-
41
- wandb_project: GnX Func Calling
42
- wandb_entity:
43
- wandb_watch:
44
- wandb_name: Func Calling GnX
45
- wandb_log_model:
46
-
47
- gradient_accumulation_steps: 32
48
- micro_batch_size: 1
49
- num_epochs: 2
50
- optimizer: adamw_bnb_8bit
51
- lr_scheduler: cosine
52
- learning_rate: 0.00002
53
- weight_decay: 0.05
54
-
55
- train_on_inputs: false
56
- group_by_length: false
57
- bf16: auto
58
- fp16:
59
- tf32: true
60
-
61
- gradient_checkpointing: true
62
- early_stopping_patience:
63
- resume_from_checkpoint:
64
- local_rank:
65
- logging_steps: 1
66
- xformers_attention:
67
- flash_attention: true
68
-
69
- warmup_ratio: 0.1
70
- evals_per_epoch: 4
71
- eval_table_size:
72
- eval_max_new_tokens: 128
73
- saves_per_epoch: 1
74
-
75
- debug:
76
- deepspeed:
77
- fsdp:
78
- fsdp_config:
79
-
80
- special_tokens:
81
  pad_token: <|finetune_right_pad_id|>
 
1
+ base_model: Delta-Vector/Holland-4B
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: NewEden/xlam-function-calling-60k-shareGPT
11
+ type: sharegpt
12
+ conversation: chatml
13
+ - path: gardner/glaive-function-calling-v2-sharegpt
14
+ type: sharegpt
15
+ conversation: chatml
16
+
17
+ chat_template: chatml
18
+
19
+ val_set_size: 0.01
20
+ output_dir: ./outputs/out
21
+
22
+ adapter:
23
+ lora_r:
24
+ lora_alpha:
25
+ lora_dropout:
26
+ lora_target_linear:
27
+
28
+ sequence_len: 16384
29
+ # sequence_len: 32768
30
+ sample_packing: true
31
+ eval_sample_packing: false
32
+ pad_to_sequence_len: true
33
+
34
+ plugins:
35
+ - axolotl.integrations.liger.LigerPlugin
36
+ liger_rope: true
37
+ liger_rms_norm: true
38
+ liger_swiglu: true
39
+ liger_fused_linear_cross_entropy: true
40
+
41
+ wandb_project: GnX Func Calling
42
+ wandb_entity:
43
+ wandb_watch:
44
+ wandb_name: Func Calling GnX
45
+ wandb_log_model:
46
+
47
+ gradient_accumulation_steps: 32
48
+ micro_batch_size: 1
49
+ num_epochs: 2
50
+ optimizer: adamw_bnb_8bit
51
+ lr_scheduler: cosine
52
+ learning_rate: 0.00002
53
+ weight_decay: 0.05
54
+
55
+ train_on_inputs: false
56
+ group_by_length: false
57
+ bf16: auto
58
+ fp16:
59
+ tf32: true
60
+
61
+ gradient_checkpointing: true
62
+ early_stopping_patience:
63
+ resume_from_checkpoint:
64
+ local_rank:
65
+ logging_steps: 1
66
+ xformers_attention:
67
+ flash_attention: true
68
+
69
+ warmup_ratio: 0.1
70
+ evals_per_epoch: 4
71
+ eval_table_size:
72
+ eval_max_new_tokens: 128
73
+ saves_per_epoch: 1
74
+
75
+ debug:
76
+ deepspeed: deepspeed_configs/zero1.json
77
+ fsdp:
78
+ fsdp_config:
79
+
80
+ special_tokens:
81
  pad_token: <|finetune_right_pad_id|>