ElleresSistemas / app.py
Elleres's picture
Create app.py
7096f2c verified
raw
history blame contribute delete
935 Bytes
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Nome do modelo carregado no Hugging Face
MODEL_NAME = "mistralai/Mixtral-8x7B-v0.1"
# Carregar o tokenizer e o modelo Mixtral
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME, torch_dtype=torch.float16, device_map="auto"
)
# Função para processar as mensagens do usuário
def chat(input_text):
inputs = tokenizer(input_text, return_tensors="pt").to("cuda")
outputs = model.generate(**inputs, max_new_tokens=200)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Criar interface no Gradio
iface = gr.Interface(
fn=chat,
inputs="text",
outputs="text",
title="Assistente Virtual Mixtral",
description="Converse com o assistente baseado no modelo Mixtral 8x7B",
)
# Executar o chatbot
iface.launch()