# Import libraries import gradio as gr import torch from PIL import Image from transformers import AutoModel, AutoTokenizer import spaces device="cuda" # Load the model and tokenizer model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16) model = model.to(device='cuda') tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True) model.eval() # Define a function to generate a response @spaces.GPU def generate_response(image, question): msgs = [{'role': 'user', 'content': question}] res = model.chat( image=image, msgs=msgs, tokenizer=tokenizer, sampling=True, temperature=0.7, stream=True ) generated_text = "" for new_text in res: generated_text += new_text return generated_text # Create the footer with links footer = """
LinkedIn | GitHub | Live demo of my PhD defense
Made with 💖 by Pejman Ebrahimi
""" # Create a Gradio interface using gr.Blocks with gr.Blocks(theme='abidlabs/dracula_revamped') as demo: gr.Markdown("Visual Question Answering - Complete chart and image analysis") gr.Markdown("Input an image and a question related to the image to receive a response.") image_input = gr.Image(type="pil", label="Image") question_input = gr.Textbox(label="Question") output_text = gr.Textbox(label="Response") image_input.change(generate_response, inputs=[image_input, question_input], outputs=output_text) gr.HTML(footer) # Launch the app demo.launch(debug=True)