facat commited on
Commit
e3eb48d
β€’
1 Parent(s): 9e51263

del device_map

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -69,20 +69,20 @@ def load_lora(lora_path, base_model="decapoda-research/llama-7b-hf"):
69
  model = LlamaForCausalLM.from_pretrained(
70
  base_model,
71
  # load_in_8bit=True,
72
- device_map=device_map,
73
  low_cpu_mem_usage=True,
74
  )
75
  print("Loading LoRA...")
76
  lora = PeftModel.from_pretrained(
77
  model,
78
  lora_path,
79
- device_map=device_map,
80
  )
81
  return lora
82
 
83
 
84
  base_model = "decapoda-research/llama-13b-hf"
85
- tokenizer = LlamaTokenizer.from_pretrained(base_model, device_map=device_map)
86
  # question = "ε¦‚ζžœδ»Šε€©ζ˜―ζ˜ŸζœŸδΊ”, ι‚£δΉˆεŽε€©ζ˜―ζ˜ŸζœŸε‡ ?"
87
  model = load_lora(lora_path="facat/alpaca-lora-cn-13b", base_model=base_model)
88
 
 
69
  model = LlamaForCausalLM.from_pretrained(
70
  base_model,
71
  # load_in_8bit=True,
72
+ # device_map=device_map,
73
  low_cpu_mem_usage=True,
74
  )
75
  print("Loading LoRA...")
76
  lora = PeftModel.from_pretrained(
77
  model,
78
  lora_path,
79
+ # device_map=device_map,
80
  )
81
  return lora
82
 
83
 
84
  base_model = "decapoda-research/llama-13b-hf"
85
+ tokenizer = LlamaTokenizer.from_pretrained(base_model)
86
  # question = "ε¦‚ζžœδ»Šε€©ζ˜―ζ˜ŸζœŸδΊ”, ι‚£δΉˆεŽε€©ζ˜―ζ˜ŸζœŸε‡ ?"
87
  model = load_lora(lora_path="facat/alpaca-lora-cn-13b", base_model=base_model)
88