import os
from threading import Thread
from typing import Iterator
import gradio as gr
import torch
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from huggingface_hub import InferenceClient
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL = "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct"
MAX_NEW_TOKENS = 4096
DEFAULT_MAX_NEW_TOKENS = 512
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "16384"))
DESCRIPTION = """\
#
EXAONE 3.5: Series of Large Language Models for Real-world Use Cases
##### We hope EXAONE continues to advance Expert AI with its effectiveness and bilingual skills.
👋 For more details, please check EXAONE-3.5 collections, our blog or technical report
#### EXAONE-3.5-32B-Instruct Demo Coming Soon..
"""
EXAMPLES = [
["Explain how wonderful you are"],
["스스로를 자랑해 봐"],
]
BOT_AVATAR = "EXAONE_logo.png"
selected_model = gr.Radio(value="https://jps6tfdq34ydttbh.us-east4.gcp.endpoints.huggingface.cloud",visible=False)
ADDITIONAL_INPUTS = [
gr.Textbox(
value="You are EXAONE model from LG AI Research, a helpful assistant.",
label="System Prompt",
render=False,
),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=2.0,
step=0.1,
value=0.7,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=1,
),
selected_model
]
tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct")
def generate(
message: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int = 512,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
selected_model: str = "https://jps6tfdq34ydttbh.us-east4.gcp.endpoints.huggingface.cloud",
) -> Iterator[str]:
print(f'model: {selected_model}')
messages = [{"role":"system","content": system_prompt}]
print(f'message: {message}')
print(f'chat_history: {chat_history}')
for user, assistant in chat_history:
messages.extend(
[
{"role": "user", "content": user},
{"role": "assistant", "content": assistant},
]
)
messages.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt"
)
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from messages as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
messages = tokenizer.decode(input_ids[0])
client = InferenceClient(selected_model, token=HF_TOKEN)
gen_kwargs = dict(
max_new_tokens=max_new_tokens,
top_p=top_p,
top_k=top_k,
temperature=temperature,
stop=["[|endofturn|]"]
)
output = client.text_generation(messages, **gen_kwargs)
return output
def radio1_change(model_size):
return f"EXAONE-3.5-{model_size}-instruct"
def choices_model(model_size):
endpoint_url_dict = {
"2.4B": "https://jps6tfdq34ydttbh.us-east4.gcp.endpoints.huggingface.cloud", # L4
"7.8B": "https://wafz6im0d595g715.us-east-1.aws.endpoints.huggingface.cloud", # L40S
}
return endpoint_url_dict[model_size]
chat_interface = gr.ChatInterface(
fn=generate,
chatbot=gr.Chatbot(
label="EXAONE-3.5-Instruct",
avatar_images=[None, BOT_AVATAR],
layout="bubble",
bubble_full_width=False
),
additional_inputs=ADDITIONAL_INPUTS,
stop_btn=None,
examples=EXAMPLES,
cache_examples=False,
)
with gr.Blocks(fill_height=True) as demo:
gr.Markdown("""""")
gr.Markdown(DESCRIPTION)
markdown = gr.Markdown("
EXAONE-3.5-2.4B-instruct")
with gr.Row():
model_size = ["2.4B", "7.8B"]
radio1 = gr.Radio(choices=model_size, label="EXAONE-3.5-Instruct", value=model_size[0])
radio1.change(radio1_change, inputs=radio1, outputs=markdown)
radio1.change(choices_model, inputs=radio1, outputs=selected_model)
chat_interface.render()
if __name__ == "__main__":
demo.queue(max_size=25).launch()