Sephfox commited on
Commit
00837e2
·
verified ·
1 Parent(s): b303379

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -114,14 +114,14 @@ class LLMResponder:
114
  try:
115
  self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
116
  except Exception as e:
117
- print("Error loading tokenizer from", model_name, "; using fallback tokenizer.")
118
- fallback_model = "decapoda-research/llama-7b-hf"
119
  self.llm_tokenizer = AutoTokenizer.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
120
  try:
121
  self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
122
  except Exception as e:
123
- print("Error loading model from", model_name, "; using fallback model.")
124
- fallback_model = "decapoda-research/llama-7b-hf"
125
  self.llm_model = AutoModelForCausalLM.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
126
  self.backend = "transformers"
127
 
 
114
  try:
115
  self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
116
  except Exception as e:
117
+ print(f"Error loading tokenizer from {model_name}; using fallback tokenizer.")
118
+ fallback_model = "sentence-transformers/all-MiniLM-L6-v2"
119
  self.llm_tokenizer = AutoTokenizer.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
120
  try:
121
  self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
122
  except Exception as e:
123
+ print(f"Error loading model from {model_name}; using fallback model.")
124
+ fallback_model = "sentence-transformers/all-MiniLM-L6-v2"
125
  self.llm_model = AutoModelForCausalLM.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
126
  self.backend = "transformers"
127