ahmedmbutt's picture
Update app.py
76df62a verified
import gradio as gr
from transformers import BlipProcessor, BlipForConditionalGeneration
from PIL import Image
import torch
# Load the processor and model
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
def caption_image(image):
# Prepare the image
inputs = processor(images=image, return_tensors="pt")
# Generate caption
out = model.generate(**inputs)
# Decode the generated caption
caption = processor.decode(out[0], skip_special_tokens=True)
return caption
# Set up the Gradio interface
interface = gr.Interface(
fn=caption_image,
inputs=gr.Image(type="pil"),
outputs="text",
title="Image Captioning",
description="Generate captions for images using the BLIP model."
)
# Launch the interface
interface.launch()