djward888's picture
Update app.py
7c47512 verified
raw
history blame
1.52 kB
import gradio as gr
from llama_cpp import Llama
llm = Llama(model_path="model.gguf", n_ctx=8000, n_threads=2, chat_format="chatml")
def generate(message, history, do_sample=True, temperature=0.7,max_tokens=4000, top_p=0.9):
system_prompt = """You are an advanced artificial intelligence assistant. Your role is to give clear and precise answers."""
formatted_prompt = [{"role": "system", "content": system_prompt}]
for user_prompt, bot_response in history:
formatted_prompt.append({"role": "user", "content": user_prompt})
formatted_prompt.append({"role": "assistant", "content": bot_response })
formatted_prompt.append({"role": "user", "content": message})
stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature, max_tokens=max_tokens, stream=True)
response = ""
for chunk in stream_response:
if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]:
response += chunk['choices'][0]["delta"]["content"]
yield response
mychatbot = gr.Chatbot(
avatar_images=["user.png", "botnb.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
iface = gr.ChatInterface(fn=generate, chatbot=mychatbot, retry_btn=None, undo_btn=None)
with gr.Blocks() as demo:
gr.HTML("<center><h1> Hermes-2-Theta-Llama-3-8B - Q8_K_M - GGUF (Quantized) </h1></center>")
iface.render()
demo.queue().launch(show_api=False, server_name="0.0.0.0")