File size: 4,903 Bytes
7ebfeb9
afda258
 
cf05f8b
22ed06b
 
623b4fb
931c795
e82dfb2
7fcb6d2
623b4fb
eba7622
afda258
8875dbc
f385ddd
 
7ebfeb9
8875dbc
f385ddd
 
afda258
dee2758
f385ddd
 
fc6f52f
f385ddd
8ba8a00
 
 
 
4c7d9fb
7ebfeb9
97fceae
 
a4593c9
 
8ba8a00
97fceae
 
 
 
 
9cda1d3
97fceae
eba7622
8ba8a00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd7ff01
97fceae
8ba8a00
97fceae
8ba8a00
97fceae
8ba8a00
f385ddd
8ba8a00
 
97fceae
5cebe28
97fceae
8ba8a00
 
 
 
af973ef
8ba8a00
 
97fceae
0a2f651
c9254be
ae697d5
 
97fceae
 
 
8ba8a00
cc88e44
8ba8a00
a4593c9
 
 
97fceae
 
cc88e44
97fceae
8875dbc
623b4fb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from PIL import Image
import clipGPT
import vitGPT
import skimage.io as io
import PIL.Image
import difflib
import ViTCoAtt
import cnnrnn
from build_vocab import Vocabulary


# Caption generation functions
def generate_caption_clipgpt(image, max_tokens, temperature):
    caption = clipGPT.generate_caption_clipgpt(image, max_tokens, temperature)
    return caption

def generate_caption_vitgpt(image, max_tokens, temperature):
    caption = vitGPT.generate_caption(image, max_tokens, temperature)
    return caption

def generate_caption_vitCoAtt(image):
    caption = ViTCoAtt.CaptionSampler.main(image)
    return caption

def generate_caption_cnnrnn(image):
    with open('/content/Image_features_ecoder_decoder.pickle', 'rb') as f:
        Xnet_features = pickle.load(f)
        image = Xnet_features[image]
        caption = cnnrnn.get_result(image)
    return caption


with gr.Row():
    image = gr.Image(label="Upload Chest X-ray", type="pil")   
                     
 
with gr.Row():
    with gr.Column(): # Column for dropdowns and model choice
        max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
        temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7) 

    model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention", "Baseline Model CNN-RNN"], label="Select Model") 
    generate_button = gr.Button("Generate Caption") 
    

real_captions = {"0" : "No acute cardiopulmonary abnormality. Low lung volumes. Heart size and mediastinal contour within normal limits. No focal air space consolidation, pneumothorax, or pleural effusion. Mild thoracic spine degenerative change.", 
                 "1":"Left basilar atelectasis and/or infiltrate, with no radiographic evidence of tuberculosis. Heart size upper limits of normal. Small amount of left basilar airspace disease. The right lung is clear. There are no cavitary lesions seen. No pneumothorax. No pleural effusions",
                 "2":"Cardiomegaly and small bilateral pleural effusions. Abnormal pulmonary opacities most suggestive of pulmonary edema, primary differential diagnosis includes infection and aspiration, clinical correlation recommended Moderate-to-marked enlargement of the cardiac silhouette, mediastinal contours appear similar to prior. Mild bilateral posterior sulcus blunting, interstitial and alveolar opacities greatest in the central lungs and bases with indistinct vascular margination.",
                 "3":"Severe cardiomegaly. Limited mediastinal evaluation given body habitus and lordotic projection. Recommend XXXX for further evaluation of mediastinum given T/Spine injury noted on C/Spine imaging. Critical result notification documented through Primordial. Lordotic projection and large body habitus. Limited mediastinal evaluation. Severe cardiomegaly. No visualized pneumothorax. No large effusion or airspace disease. No fracture."}

imgIDs = {"0":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR192_IM-0598_0",
          "1":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR194_IM-0609_0",
          "2":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR2637_IM-1122_0",
          "3":"/content/drive/MyDrive/cnn-rnn/NLMCXR_png/CXR1111_IM-0077_0"}


caption = gr.Textbox(label="Generated Caption")
real_caption = gr.Textbox(label="Actual Caption")

def predict(img, model_name, max_tokens, temperature, examples):
    if model_name == "CLIP-GPT2":
        return generate_caption_clipgpt(img, max_tokens, temperature), getCaption(examples)
    elif model_name == "ViT-GPT2":
        return generate_caption_vitgpt(img, max_tokens, temperature), getCaption(examples)
    elif model_name == "ViT-CoAttention":
        return generate_caption_vitCoAtt(img),  getCaption(examples)
    elif model_name == "Baseline Model CNN-RNN":
        img = getImageID(examples)
        return generate_caption_cnnrnn(img), getCaption(examples)
    else:
        return "Caption generation for this model is not yet implemented."   

def getCaption(examples):
    print(real_captions[examples[1]])
    return real_captions[examples[1]]
    
def getImageID(examples):
    print(imgIDs[examples[1]])
    return imgIDs[examples[1]]

examples = [[f"example{i}.jpg"] for i in range(1,7)]

description= "You can generate captions by uploading an X-Ray and selecting a model of your choice below. Please select the number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models"
title = "MedViT: A Vision Transformer-Driven Method for Generating Medical Reports 🏥🤖"

interface = gr.Interface(
        fn=predict,
        inputs = [image, model_choice, max_tokens, temperature, examples],
        theme="sudeepshouche/minimalist",
        outputs=[caption,real_caption],
        examples = examples,
        title = title,
        description = description
    )


interface.launch(debug=True)