ndn1954 commited on
Commit
c41ca31
1 Parent(s): 90fdd1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -100,7 +100,7 @@ def select_llm() -> Union[ChatOpenAI, LlamaCpp]:
100
  Read user selection of parameters in Streamlit sidebar.
101
  """
102
  model_name = st.sidebar.radio("Choose LLM:",
103
- ("Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M",#"llama-2-7b-chat.ggmlv3.q2_K",
104
  "gpt-3.5-turbo-0613",
105
  "gpt-3.5-turbo-16k-0613",
106
  "gpt-4"))
@@ -228,8 +228,8 @@ def main() -> None:
228
 
229
  model_name, temperature = select_llm()
230
  #llm = load_llm(model_name, temperature)
231
- #url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin"
232
- url = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin"
233
  try:
234
  model_loc, file_size = dl_hf_model(url)
235
  except Exception as exc_:
 
100
  Read user selection of parameters in Streamlit sidebar.
101
  """
102
  model_name = st.sidebar.radio("Choose LLM:",
103
+ ("llama-2-7b-chat.ggmlv3.q2_K",#"Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M"
104
  "gpt-3.5-turbo-0613",
105
  "gpt-3.5-turbo-16k-0613",
106
  "gpt-4"))
 
228
 
229
  model_name, temperature = select_llm()
230
  #llm = load_llm(model_name, temperature)
231
+ url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/blob/main/llama-2-7b-chat.ggmlv3.q2_K.bin"
232
+ #url = "https://huggingface.co/TheBloke/Wizard-Vicuna-7B-Uncensored-GGML/raw/main/Wizard-Vicuna-7B-Uncensored.ggmlv3.q4_K_M.bin"
233
  try:
234
  model_loc, file_size = dl_hf_model(url)
235
  except Exception as exc_: