dh-mc commited on
Commit
272aed7
·
1 Parent(s): 73f796f

reduce batch size

Browse files
llama-factory/config/llama3-8b_lora_sft_bf16-p1_en.yaml CHANGED
@@ -27,7 +27,7 @@ plot_loss: true
27
  # overwrite_output_dir: true
28
 
29
  ### train
30
- per_device_train_batch_size: 16
31
  gradient_accumulation_steps: 8
32
  learning_rate: 1.0e-4
33
  num_train_epochs: 1.0
 
27
  # overwrite_output_dir: true
28
 
29
  ### train
30
+ per_device_train_batch_size: 8
31
  gradient_accumulation_steps: 8
32
  learning_rate: 1.0e-4
33
  num_train_epochs: 1.0
llama-factory/config/llama3-8b_lora_sft_bf16-p2_en.yaml CHANGED
@@ -27,7 +27,7 @@ plot_loss: true
27
  # overwrite_output_dir: true
28
 
29
  ### train
30
- per_device_train_batch_size: 16
31
  gradient_accumulation_steps: 8
32
  learning_rate: 1.0e-4
33
  num_train_epochs: 1.0
 
27
  # overwrite_output_dir: true
28
 
29
  ### train
30
+ per_device_train_batch_size: 8
31
  gradient_accumulation_steps: 8
32
  learning_rate: 1.0e-4
33
  num_train_epochs: 1.0