Spaces:
Sleeping
Sleeping
# Load model directly | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
import torch | |
""" from huggingface_hub import login | |
login(token="token", add_to_git_credential=True) """ | |
model_name = "microsoft/DialoGPT-large" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
model.generation_config.pad_token_id = model.generation_config.eos_token_id | |
chat_history_ids = None | |
def generate_response(user_input, history = []): | |
global chat_history_ids # Para modificar la variable global | |
new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') | |
if len(history) > 0: | |
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) | |
else: | |
bot_input_ids = new_user_input_ids | |
chat_history_ids = model.generate(bot_input_ids, max_length=10000, pad_token_id=tokenizer.eos_token_id) | |
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
# Para depuración | |
#print("user", new_user_input_ids) | |
#print("bot",chat_history_ids[:, bot_input_ids.shape[-1]:][0]) | |
#print(chat_history_ids) | |
return response | |
chatbot_interface = gr.ChatInterface(fn=generate_response) | |
demo = gr.TabbedInterface([ | |
chatbot_interface | |
], ["chatbot"]) | |
demo.launch() |