arad1367's picture
Update app.py
0c9f542 verified
raw
history blame
1.25 kB
# Import libraries
import gradio as gr
import torch
from PIL import Image
from transformers import AutoModel, AutoTokenizer
import spaces
device="cuda"
# Load the model and tokenizer
model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
model = model.to(device='cuda')
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True)
model.eval()
# Define a function to generate a response
@spaces.GPU
def generate_response(image, question):
msgs = [{'role': 'user', 'content': question}]
res = model.chat(
image=image,
msgs=msgs,
tokenizer=tokenizer,
sampling=True,
temperature=0.7,
stream=True
)
generated_text = ""
for new_text in res:
generated_text += new_text
return generated_text
# Create a Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs=[gr.Image(type="pil"), "text"],
outputs="text",
title="Visual Question Answering - Financial charts analysis",
description="Input an image and a question related to the image to receive a response.",
theme='abidlabs/dracula_revamped'
)
# Launch the app
iface.launch(debug=True)