BlueChat-v0 / app.py
heegyu's picture
top-p, temperature 臁办爼
2b4ba4a
import gradio as gr
import torch
import random
import time
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
model_name="heegyu/bluechat-v0"
device="cuda:0" if torch.cuda.is_available() else 'cpu'
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# generator = pipeline(
# 'text-generation',
# model="heegyu/bluechat-v0",
# device="cuda:0" if torch.cuda.is_available() else 'cpu'
# )
def get_message(prompt, min_new_tokens=5, max_turn=4):
prompt = prompt.strip()
ids = tokenizer(prompt, return_tensors="pt").to(device)
min_length = ids['input_ids'].shape[1] + min_new_tokens
output = model.generate(
**ids,
no_repeat_ngram_size=3,
eos_token_id=2, # 375=\n 2=</s>, 0:open-end
max_new_tokens=128,
min_length=min_length,
do_sample=True,
top_p=0.95,
temperature=1.35,
early_stopping=True
) # [0]['generated_text']
output = tokenizer.decode(output.cpu()[0])
print(output)
return output[len(prompt):]
def query(message, chat_history, max_turn=4):
prompt = []
if len(chat_history) > max_turn:
chat_history = chat_history[-max_turn:]
for user, bot in chat_history:
prompt.append(f"<usr> {user}")
prompt.append(f"<bot> {bot}")
prompt.append(f"<usr> {message}")
prompt = "\n".join(prompt) + "\n<bot>"
response = get_message(prompt, 8)
return response.strip()
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
def respond(message, chat_history):
bot_message = query(message, chat_history) #random.choice(["How are you?", "I love you", "I'm very hungry"])
chat_history.append((message, bot_message))
# time.sleep(1)
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch()