Jyothirmai's picture
Update app.py
4c49c1f verified
raw
history blame
No virus
5.19 kB
import gradio as gr
from PIL import Image
import clipGPT
import vitGPT
import skimage.io as io
import PIL.Image
import difflib
import ViTCoAtt
import cnnrnn
from build_vocab import Vocabulary
import pickle
# Caption generation functions
def generate_caption_clipgpt(image, max_tokens, temperature):
caption = clipGPT.generate_caption_clipgpt(image, max_tokens, temperature)
return caption
def generate_caption_vitgpt(image, max_tokens, temperature):
caption = vitGPT.generate_caption(image, max_tokens, temperature)
return caption
def generate_caption_vitCoAtt(image):
caption = ViTCoAtt.CaptionSampler.main(image)
return caption
def generate_caption_cnnrnn(image):
with open('Image_features_ecoder_decoder.pickle', 'rb') as f:
Xnet_features = pickle.load(f)
image = Xnet_features[image]
print(image.shape)
caption = cnnrnn.get_result(image)
return caption
with gr.Row():
image = gr.Image(label="Upload Chest X-ray", type="pil", height='50',width='50')
with gr.Row():
with gr.Column(): # Column for dropdowns and model choice
max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.9)
imgID = gr.Dropdown(["1","2","3","4"], label="Choose the ID of the image selected")
model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention", "Baseline Model CNN-RNN"], label="Select Model")
generate_button = gr.Button("Generate Caption")
caption = gr.Textbox(label="Generated Caption")
real_caption = gr.Textbox(label="Actual Caption")
def getCaption(imgID):
real_captions = {"1" : "No acute cardiopulmonary abnormality. 2. Stable bilateral emphysematous and lower lobe fibrotic changes. Bilateral emphysematous again noted and lower lobe fibrotic changes. Postsurgical changes of the chest including CABG procedure, stable. Stable valve artifact. There are no focal areas of consolidation. No large pleural effusions. No evidence of pneumothorax. Degenerative changes noted of the visualized thoracic spine. Nodular right lower lobe opacity, XXXX nipple XXXX. Contour abnormality of the posterior aspect of the right 7th rib again noted, stable.",
"2":"Hypoinflation with bibasilar focal atelectasis. Lung volumes are XXXX. XXXX opacities are present in both lung bases. A hiatal hernia is present. Heart and pulmonary XXXX are normal.",
"3":"No acute cardiopulmonary process. 2. 9 mm left lower lobe pulmonary nodule not definitively calcified. Recommend comparison with prior images to document stability. If none are available consider nonemergent XXXX chest for further characterization. . Normal heart size and mediastinal contours. The lungs are free of any focal airspace disease. In the left lung base, there is a 9 mm nodule that not definitively calcified. No pneumothorax or pleural effusion. No acute bony abnormalities.",
"4":"No acute disease. Left lung nodule. Correlate with prior films if available. If none are available, followup imaging in 6 months is suggested. The heart is normal in size. The mediastinum is unremarkable. Small nodule seen in the left upper lung, possibly granuloma. The lungs are otherwise clear.",
"5": "No Actual Caption",
"6": "No Actual Caption"}
return real_captions[imgID]
def getImageID(imgID):
imgIDs = {"1":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR412_IM-2056_0",
"2":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR545_IM-2149_0",
"3":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR849_IM-2371_0",
"4":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR2163_IM-0779_0"}
return imgIDs[imgID]
def predict(img, model_name, max_tokens, temperature, imgID):
if model_name == "CLIP-GPT2":
return generate_caption_clipgpt(img, max_tokens, temperature), getCaption(imgID)
elif model_name == "ViT-GPT2":
return generate_caption_vitgpt(img, max_tokens, temperature), getCaption(imgID)
elif model_name == "ViT-CoAttention":
return generate_caption_vitCoAtt(img), getCaption(imgID)
elif model_name == "Baseline Model CNN-RNN":
img = getImageID(imgID)
return generate_caption_cnnrnn(img), getCaption(imgID)
else:
return "select a model","select an image"
#main call
examples = [[f"example{i}.jpg"] for i in range(1,7)]
description= "You can generate captions by uploading an X-Ray and selecting a model of your choice below. Please select the number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models"
title = "MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–"
interface = gr.Interface(
fn=predict,
inputs = [image, model_choice, max_tokens, temperature, imgID],
theme="sudeepshouche/minimalist",
outputs=[caption,real_caption],
examples = examples,
title = title,
description = description
)
interface.launch(debug=True)