dh-mc commited on
Commit
1f778a0
·
1 Parent(s): b2d230a

llama3 finetune config

Browse files
llama-factory/config/llama3-8b_lora_sft_bf16-p1.yaml CHANGED
@@ -22,16 +22,16 @@ preprocessing_num_workers: 16
22
  ### output
23
  output_dir: saves/llama3-8b/lora/sft_bf16_p1_full
24
  logging_steps: 10
25
- save_steps: 88
26
  plot_loss: true
27
  overwrite_output_dir: true
28
  # resume_from_checkpoint: true
29
 
30
  ### train
31
- per_device_train_batch_size: 32
32
  gradient_accumulation_steps: 8
33
  learning_rate: 1.0e-4
34
- num_train_epochs: 6.0
35
  lr_scheduler_type: cosine
36
  warmup_ratio: 0.1
37
  bf16: true
@@ -41,7 +41,7 @@ ddp_timeout: 180000000
41
  val_size: 0.1
42
  per_device_eval_batch_size: 1
43
  eval_strategy: steps
44
- eval_steps: 88
45
 
46
  report_to: wandb
47
  run_name: llama3_8b_p1_full # optional
 
22
  ### output
23
  output_dir: saves/llama3-8b/lora/sft_bf16_p1_full
24
  logging_steps: 10
25
+ save_steps: 175
26
  plot_loss: true
27
  overwrite_output_dir: true
28
  # resume_from_checkpoint: true
29
 
30
  ### train
31
+ per_device_train_batch_size: 16
32
  gradient_accumulation_steps: 8
33
  learning_rate: 1.0e-4
34
+ num_train_epochs: 3.0
35
  lr_scheduler_type: cosine
36
  warmup_ratio: 0.1
37
  bf16: true
 
41
  val_size: 0.1
42
  per_device_eval_batch_size: 1
43
  eval_strategy: steps
44
+ eval_steps: 175
45
 
46
  report_to: wandb
47
  run_name: llama3_8b_p1_full # optional
llama-factory/config/llama3-8b_lora_sft_bf16-p2.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: FlagAlpha/Llama3-Chinese-8B-Instruct
3
+
4
+ ### method
5
+ stage: sft
6
+ do_train: true
7
+ finetuning_type: lora
8
+ lora_target: all
9
+ # quantization_bit: 4 # use 4-bit QLoRA
10
+ loraplus_lr_ratio: 16.0 # use LoRA+ with lambda=16.0
11
+ # use_unsloth: true # use UnslothAI's LoRA optimization for 2x faster training
12
+ upcast_layernorm: true
13
+
14
+ ### dataset
15
+ dataset: alpaca_mgtv_p2
16
+ template: llama3
17
+ cutoff_len: 4096
18
+ max_samples: 25000
19
+ overwrite_cache: true
20
+ preprocessing_num_workers: 16
21
+
22
+ ### output
23
+ output_dir: saves/llama3-8b/lora/sft_bf16_p2_full
24
+ logging_steps: 10
25
+ save_steps: 175
26
+ plot_loss: true
27
+ overwrite_output_dir: true
28
+ # resume_from_checkpoint: true
29
+
30
+ ### train
31
+ per_device_train_batch_size: 16
32
+ gradient_accumulation_steps: 8
33
+ learning_rate: 1.0e-4
34
+ num_train_epochs: 3.0
35
+ lr_scheduler_type: cosine
36
+ warmup_ratio: 0.1
37
+ bf16: true
38
+ ddp_timeout: 180000000
39
+
40
+ ### eval
41
+ val_size: 0.1
42
+ per_device_eval_batch_size: 1
43
+ eval_strategy: steps
44
+ eval_steps: 175
45
+
46
+ report_to: wandb
47
+ run_name: llama3_8b_p2_full # optional