File size: 1,068 Bytes
33e54e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
_target_: src.models_clm.peft_models.get_peft_model_with_resize_embedding
model:
  _target_: src.models_clm.modeling_llama_xformer.LlamaForCausalLM.from_pretrained
  # _target_: transformers.LlamaForCausalLM.from_pretrained
  pretrained_model_name_or_path: luodian/llama-7b-hf
  low_cpu_mem_usage: True
peft_config:
  _target_: peft.LoraConfig
  _convert_: object
  r: 16
  lora_alpha: 32
  modules_to_save:
    # - embed_tokens
    # - lm_head
    - input_layernorm
    - post_attention_layernorm
    - norm
  target_modules: 
    - q_proj 
    - v_proj 
    - k_proj 
    - o_proj 
    - gate_proj 
    - down_proj 
    - up_proj
  task_type: CAUSAL_LM
  lora_dropout: 0.05

vocab_size: 32066
# _target_: src.models_clm.peft_models.get_model_with_resize_embedding
# model:
#   # _target_: src.models_clm.modeling_llama_xformer.LlamaForCausalLM.from_pretrained
#   _target_: transformers.LlamaForCausalLM.from_pretrained
#   pretrained_model_name_or_path: /apdcephfs_cq3/share_1290939/sijiezhao/model_hub/Llama-2-7b-hf
#   low_cpu_mem_usage: True

# vocab_size: 32066