facat commited on
Commit
9be8f93
β€’
1 Parent(s): e3eb48d

switch to 7b

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -81,10 +81,10 @@ def load_lora(lora_path, base_model="decapoda-research/llama-7b-hf"):
81
  return lora
82
 
83
 
84
- base_model = "decapoda-research/llama-13b-hf"
85
  tokenizer = LlamaTokenizer.from_pretrained(base_model)
86
  # question = "ε¦‚ζžœδ»Šε€©ζ˜―ζ˜ŸζœŸδΊ”, ι‚£δΉˆεŽε€©ζ˜―ζ˜ŸζœŸε‡ ?"
87
- model = load_lora(lora_path="facat/alpaca-lora-cn-13b", base_model=base_model)
88
 
89
  eval = lambda question, input, temperature, beams, max_token: evaluate(
90
  model,
 
81
  return lora
82
 
83
 
84
+ base_model = "decapoda-research/llama-7b-hf"
85
  tokenizer = LlamaTokenizer.from_pretrained(base_model)
86
  # question = "ε¦‚ζžœδ»Šε€©ζ˜―ζ˜ŸζœŸδΊ”, ι‚£δΉˆεŽε€©ζ˜―ζ˜ŸζœŸε‡ ?"
87
+ model = load_lora(lora_path="facat/alpaca-lora-cn", base_model=base_model)
88
 
89
  eval = lambda question, input, temperature, beams, max_token: evaluate(
90
  model,