dh-mc commited on
Commit
de6246f
·
1 Parent(s): 679dc6a

upcast_layernorm: true for 4bit qlora

Browse files
llama-factory/config/qwen2_72b_lora_sft_4bit-p1.yaml CHANGED
@@ -9,6 +9,7 @@ lora_target: all
9
  quantization_bit: 4 # use 4-bit QLoRA
10
  loraplus_lr_ratio: 16.0 # use LoRA+ with lambda=16.0
11
  # use_unsloth: true # use UnslothAI's LoRA optimization for 2x faster training
 
12
 
13
  ### dataset
14
  dataset: alpaca_mac
 
9
  quantization_bit: 4 # use 4-bit QLoRA
10
  loraplus_lr_ratio: 16.0 # use LoRA+ with lambda=16.0
11
  # use_unsloth: true # use UnslothAI's LoRA optimization for 2x faster training
12
+ upcast_layernorm: true
13
 
14
  ### dataset
15
  dataset: alpaca_mac
llama-factory/config/qwen2_72b_lora_sft_4bit-p2.yaml CHANGED
@@ -9,6 +9,7 @@ lora_target: all
9
  quantization_bit: 4 # use 4-bit QLoRA
10
  loraplus_lr_ratio: 16.0 # use LoRA+ with lambda=16.0
11
  # use_unsloth: true # use UnslothAI's LoRA optimization for 2x faster training
 
12
 
13
  ### dataset
14
  dataset: alpaca_mac
 
9
  quantization_bit: 4 # use 4-bit QLoRA
10
  loraplus_lr_ratio: 16.0 # use LoRA+ with lambda=16.0
11
  # use_unsloth: true # use UnslothAI's LoRA optimization for 2x faster training
12
+ upcast_layernorm: true
13
 
14
  ### dataset
15
  dataset: alpaca_mac