djward888 commited on
Commit
f11f943
1 Parent(s): 2f84a7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ from llama_cpp import Llama
3
 
4
  llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml")
5
 
6
- def generate(message, history, do_sample=True, temperature=0.7,max_tokens=4000, top_p=0.9):
7
  system_prompt = """You are an advanced artificial intelligence assistant. Your role is to give clear and precise answers."""
8
  formatted_prompt = [{"role": "system", "content": system_prompt}]
9
  for user_prompt, bot_response in history:
@@ -23,7 +23,7 @@ avatar_images=["user.png", "botnb.png"], bubble_full_width=False, show_label=Fal
23
  iface = gr.ChatInterface(fn=generate, chatbot=mychatbot, retry_btn=None, undo_btn=None)
24
 
25
  with gr.Blocks() as demo:
26
- gr.HTML("<center><h1> Hermes-2-Theta-Llama-3-8B - Q8_K_M - GGUF (Quantized) </h1></center>")
27
  iface.render()
28
 
29
  demo.queue().launch(show_api=False, server_name="0.0.0.0")
 
3
 
4
  llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml")
5
 
6
+ def generate(message, history, do_sample=True, temperature=0.7,max_tokens=8000, top_p=0.9):
7
  system_prompt = """You are an advanced artificial intelligence assistant. Your role is to give clear and precise answers."""
8
  formatted_prompt = [{"role": "system", "content": system_prompt}]
9
  for user_prompt, bot_response in history:
 
23
  iface = gr.ChatInterface(fn=generate, chatbot=mychatbot, retry_btn=None, undo_btn=None)
24
 
25
  with gr.Blocks() as demo:
26
+ gr.HTML("<center><h1> Hermes-2-Theta-Llama-3-8B </h1></center>")
27
  iface.render()
28
 
29
  demo.queue().launch(show_api=False, server_name="0.0.0.0")