File size: 760 Bytes
18d3b72 fa6311b 18d3b72 fa6311b 18d3b72 fa6311b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
import gradio as gr
from transformers import AutoTokenizer, GPTJForCausalLM
# Cargar el modelo en español
model_name = "mrm8488/bertin-gpt-j-6B-ES-v1-8bit"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = GPTJForCausalLM.from_pretrained(model_name)
# Definir la función de respuesta para el chatbot
def chatbot(message):
inputs = tokenizer(message, return_tensors="pt")
outputs = model.generate(inputs.input_ids, max_length=150)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Crear la interfaz de Gradio
demo = gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="Chatbot en Español"
)
# Ejecutar la aplicación
if __name__ == "__main__":
demo.launch()
|