Spaces:
Runtime error
Runtime error
File size: 1,862 Bytes
576483d 73df278 052823c ee22789 975546a ee22789 975546a e95a739 576483d 303f68d 576483d f16c19e 576483d 052823c 576483d f16c19e 576483d ee22789 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import os
from groq import Groq
import gradio as gr
from transformers import AutoModel, AutoConfig
hf_token = os.getenv("HF_TOKEN") # Make sure you set this environment variable
try:
config = AutoConfig.from_pretrained("HusseinEid/llama-3-chatbot", config_file_name="config.json", use_auth_token=hf_token)
model = AutoModel.from_pretrained("HusseinEid/lora_model", config=config, use_auth_token=hf_token)
except OSError as e:
print(f"Error: {e}")
client = Groq(api_key = os.environ.get("GROQ_API_KEY"), )
system_prompt = {
"role": "system",
"content":
"You are a useful assistant. You reply with detailed answers. "
}
async def chat_groq(message, history):
messages = [system_prompt]
for msg in history:
messages.append({"role": "user", "content": str(msg[0])})
messages.append({"role": "assistant", "content": str(msg[1])})
messages.append({"role": "user", "content": str (message)})
response_content = ''
stream = client. chat.completions.create(
model=model,
messages=messages,
max_tokens=1024,
temperature=1.2,
stream=True
)
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
response_content += chunk. choices[0].delta.content
yield response_content
with gr. Blocks(theme=gr.themes.Monochrome(), fill_height=True) as demo:
gr.ChatInterface( chat_groq,
clear_btn=None,
undo_btn=None,
retry_btn=None,
)
demo.queue()
demo.launch() |