Spaces:
Running
Running
import gradio as gr | |
import requests | |
import os | |
import json | |
from collections import deque | |
TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") | |
if not TOKEN: | |
raise ValueError("API token is not set. Please set the HUGGINGFACE_API_TOKEN environment variable.") | |
memory = deque(maxlen=10) | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message="AI Assistant Role", | |
max_tokens=512, | |
temperature=0.7, | |
top_p=0.95, | |
): | |
system_prefix = "System: ์ ๋ ฅ์ด์ ์ธ์ด(์์ด, ํ๊ตญ์ด, ์ค๊ตญ์ด, ์ผ๋ณธ์ด ๋ฑ)์ ๋ฐ๋ผ ๋์ผํ ์ธ์ด๋ก ๋ต๋ณํ๋ผ." | |
full_system_message = f"{system_prefix}{system_message}" | |
memory.append((message, None)) | |
messages = [{"role": "system", "content": full_system_message}] | |
for val in memory: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
headers = { | |
"Authorization": f"Bearer {TOKEN}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"model": "meta-llama/Meta-Llama-3.1-405B-Instruct", | |
"max_tokens": max_tokens, | |
"temperature": temperature, | |
"top_p": top_p, | |
"messages": messages, | |
"stream": True # ์คํธ๋ฆฌ๋ฐ ๋ชจ๋ ํ์ฑํ | |
} | |
response = requests.post("https://api-inference.huggingface.co/v1/chat/completions", headers=headers, json=payload, stream=True) | |
partial_words = "" | |
for chunk in response.iter_lines(): | |
if chunk: | |
chunk_data = chunk.decode('utf-8') | |
if chunk_data.startswith("data: "): | |
chunk_data = chunk_data[6:] # "data: " ์ ๊ฑฐ | |
try: | |
response_json = json.loads(chunk_data) | |
if "choices" in response_json: | |
delta = response_json["choices"][0].get("delta", {}) | |
if "content" in delta: | |
content = delta["content"] | |
partial_words += content | |
yield partial_words | |
except json.JSONDecodeError: | |
continue | |
theme = "Nymbo/Nymbo_Theme" | |
demo = gr.ChatInterface( | |
fn=respond, | |
theme=theme, | |
additional_inputs=[ | |
gr.Textbox(value="AI Assistant Role", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
] | |
) | |
if __name__ == "__main__": | |
demo.queue().launch(max_threads=20) |