|
|
|
|
|
import gradio as gr |
|
import os |
|
import spaces |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
from transformers import TextIteratorStreamer |
|
from threading import Thread |
|
|
|
|
|
DESCRIPTION = ''' |
|
<div> |
|
<h1 style="text-align: center;">LLM-jp v2</h1> |
|
<p>LLM-jp v2 ใฎ้ๅ
ฌๅผใใขใ ใใ <a href="https://huggingface.co/llm-jp/llm-jp-13b-instruct-full-ac_001_16x-dolly-ichikara_004_001_single-oasst-oasst2-v2.0"><b>llm-jp/llm-jp-13b-instruct-full-ac_001_16x-dolly-ichikara_004_001_single-oasst-oasst2-v2.0</b></a>.</p> |
|
</div> |
|
''' |
|
|
|
LICENSE = """ |
|
<p/> |
|
|
|
""" |
|
|
|
PLACEHOLDER = """ |
|
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> |
|
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">LLM-jp v2</h1> |
|
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">ใชใใงใใใใฆใญ</p> |
|
</div> |
|
""" |
|
|
|
|
|
css = """ |
|
h1 { |
|
text-align: center; |
|
display: block; |
|
} |
|
|
|
#duplicate-button { |
|
margin: auto; |
|
color: white; |
|
background: #1565c0; |
|
border-radius: 100vh; |
|
} |
|
""" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("llm-jp/llm-jp-13b-instruct-full-ac_001_16x-dolly-ichikara_004_001_single-oasst-oasst2-v2.0") |
|
model = AutoModelForCausalLM.from_pretrained("llm-jp/llm-jp-13b-instruct-full-ac_001_16x-dolly-ichikara_004_001_single-oasst-oasst2-v2.0", device_map="auto", torch_dtype=torch.bfloat16) |
|
|
|
@spaces.GPU |
|
def chat_llm_jp_v2(message: str, |
|
history: list, |
|
temperature: float, |
|
max_new_tokens: int |
|
) -> str: |
|
""" |
|
Generate a streaming response using the llama3-8b model. |
|
Args: |
|
message (str): The input message. |
|
history (list): The conversation history used by ChatInterface. |
|
temperature (float): The temperature for generating the response. |
|
max_new_tokens (int): The maximum number of new tokens to generate. |
|
Returns: |
|
str: The generated response. |
|
""" |
|
conversation = [] |
|
conversation.append({"role": "system", "content": "ไปฅไธใฏใใฟในใฏใ่ชฌๆใใๆ็คบใงใใ่ฆๆฑใ้ฉๅใซๆบใใๅฟ็ญใๆธใใชใใใ"}) |
|
for user, assistant in history: |
|
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}]) |
|
conversation.append({"role": "user", "content": message}) |
|
|
|
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) |
|
|
|
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) |
|
|
|
generate_kwargs = dict( |
|
input_ids= input_ids, |
|
streamer=streamer, |
|
max_new_tokens=max_new_tokens, |
|
do_sample=True, |
|
temperature=temperature, |
|
top_p=0.95, |
|
repetition_penalty=1.1, |
|
) |
|
|
|
if temperature == 0: |
|
generate_kwargs['do_sample'] = False |
|
|
|
t = Thread(target=model.generate, kwargs=generate_kwargs) |
|
t.start() |
|
|
|
outputs = [] |
|
for text in streamer: |
|
outputs.append(text) |
|
print(outputs) |
|
yield "".join(outputs) |
|
|
|
|
|
|
|
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface') |
|
|
|
with gr.Blocks(fill_height=True, css=css) as demo: |
|
|
|
gr.Markdown(DESCRIPTION) |
|
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") |
|
gr.ChatInterface( |
|
fn=chat_llm_jp_v2, |
|
chatbot=chatbot, |
|
fill_height=True, |
|
additional_inputs_accordion=gr.Accordion(label="โ๏ธ Parameters", open=False, render=False), |
|
additional_inputs=[ |
|
gr.Slider(minimum=0.0, |
|
maximum=1, |
|
step=0.1, |
|
value=0.7, |
|
label="Temperature", |
|
render=False), |
|
gr.Slider(minimum=128, |
|
maximum=4096, |
|
step=1, |
|
value=512, |
|
label="Max new tokens", |
|
render=False ), |
|
], |
|
examples=[ |
|
['ๅฐๅญฆ็ใซใใใใใใใซ็ธๅฏพๆง็่ซใๆใใฆใใ ใใใ'], |
|
['ๅฎๅฎใฎ่ตทๆบใ็ฅใใใใฎๆนๆณใในใใใใปใใคใปในใใใใงๆใใฆใใ ใใใ'], |
|
['1ใใ100ใพใงใฎ็ด ๆฐใๆฑใใในใฏใชใใใPythonใงๆธใใฆใใ ใใใ'], |
|
['ๅ้ใฎ้ฝ่ตใซใใใ่ช็ๆฅใใฌใผใณใใ่ใใฆใใ ใใใใใ ใใ้ฝ่ตใฏไธญๅญฆ็ใงใ็งใฏๅใใฏใฉในใฎ็ทๆงใงใใใใจใ่ๆ
ฎใใฆใใ ใใใ'], |
|
['ใใณใฎใณใใธใฃใณใฐใซใฎ็ๆงใงใใใใจใๆญฃๅฝๅใใใใใซ่ชฌๆใใฆใใ ใใใ'] |
|
], |
|
cache_examples=False, |
|
) |
|
|
|
gr.Markdown(LICENSE) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|