import gradio as gr from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image import torch # Example with BLIP (replace with your fine-tuned model) processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") def caption_image(image): if image is None: return "No image provided" inputs = processor(images=image, return_tensors="pt") with torch.no_grad(): out = model.generate(**inputs) caption = processor.decode(out[0], skip_special_tokens=True) return caption demo = gr.Interface( fn=caption_image, inputs=gr.Image(type="pil"), outputs="text", title="Custom UI Action Description" ) if __name__ == "__main__": demo.launch()