Jyothirmai's picture
Update app.py
fc733f6 verified
raw
history blame
2.22 kB
import gradio as gr
from PIL import Image
import clipGPT
import vitGPT
import skimage.io as io
import PIL.Image
import difflib
import tester
from build_vocab import Vocabulary
# Caption generation functions
def generate_caption_clipgpt(image):
caption = clipGPT.generate_caption_clipgpt(image)
return caption
def generate_caption_vitgpt(image):
caption = vitGPT.generate_caption(image)
return caption
def generate_caption_vitCoAtt(image):
caption = tester.CaptionSampler.main(image)
return caption
with gr.Blocks() as demo:
gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
with gr.Row():
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
with gr.Row():
sample_images = [
"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg",
"CXR192_IM-0598-1001.png",
"CXR193_IM-0601-1001.png",
"CXR194_IM-0609-1001.png",
"CXR195_IM-0618-1001.png"
]
image = gr.Image(label="Upload Chest X-ray", type="pil")
sample_images_gallery = gr.Gallery(value = sample_images,label="Sample Images")
with gr.Row():
generate_button = gr.Button("Generate Caption")
caption = gr.Textbox(label="Generated Caption")
def predict(img, model_name):
if model_name == "CLIP-GPT2":
return generate_caption_clipgpt(img)
elif model_name == "ViT-GPT2":
return generate_caption_vitgpt(img)
elif model_name == "ViT-CoAttention":
return generate_caption_vitCoAtt(img)
else:
return "Caption generation for this model is not yet implemented."
# Event handlers
generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) # Handle sample images
demo.launch()