Jyothirmai commited on
Commit
97fceae
β€’
1 Parent(s): 96fc972

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -40
app.py CHANGED
@@ -24,53 +24,59 @@ def generate_caption_vitCoAtt(image):
24
  return caption
25
 
26
 
27
- with gr.Blocks() as demo:
28
-
29
- gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
30
- gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
31
-
32
-
33
- with gr.Row():
34
 
35
- image = gr.Image(label="Upload Chest X-ray", type="pil")
36
 
37
- sample_images_gallery = gr.Gallery(value = [
38
- "https://imgur.com/W1pIr9b",
39
- "https://imgur.com/MLJaWnf",
40
- "https://imgur.com/6XymFW1",
41
- "https://imgur.com/zdPjZZ1",
42
- "https://imgur.com/DKUlZbF"], label="Sample Images", columns = 5)
43
-
44
  gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
45
 
46
 
47
- with gr.Row():
48
-
49
- with gr.Column(): # Column for dropdowns and model choice
50
- max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
51
- temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
52
-
53
- model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
54
- generate_button = gr.Button("Generate Caption")
55
-
56
-
57
 
58
- caption = gr.Textbox(label="Generated Caption")
59
 
60
- def predict(img, model_name, max_tokens, temperature):
61
- if model_name == "CLIP-GPT2":
62
- return generate_caption_clipgpt(img, max_tokens, temperature)
63
- elif model_name == "ViT-GPT2":
64
- return generate_caption_vitgpt(img, max_tokens, temperature)
65
- elif model_name == "ViT-CoAttention":
66
- return generate_caption_vitCoAtt(img)
67
- else:
68
- return "Caption generation for this model is not yet implemented."
69
-
70
 
71
- # Event handlers
72
- generate_button.click(predict, [image, model_choice, max_tokens, temperature], caption)
73
- sample_images_gallery.select(predict, [sample_images_gallery, model_choice, max_tokens, temperature], caption)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
 
76
- demo.launch()
 
24
  return caption
25
 
26
 
27
+ gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
28
+ gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
29
+
30
+
31
+ with gr.Row():
 
 
32
 
33
+ image = gr.Image(label="Upload Chest X-ray", type="pil")
34
 
35
+
 
 
 
 
 
 
36
  gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
37
 
38
 
39
+ with gr.Row():
40
+
41
+ with gr.Column(): # Column for dropdowns and model choice
42
+ max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
43
+ temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
44
+
45
+ model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
46
+ generate_button = gr.Button("Generate Caption")
 
 
47
 
 
48
 
49
+ caption = gr.Textbox(label="Generated Caption")
 
 
 
 
 
 
 
 
 
50
 
51
+ def predict(img, model_name, max_tokens, temperature):
52
+ if model_name == "CLIP-GPT2":
53
+ return generate_caption_clipgpt(img, max_tokens, temperature)
54
+ elif model_name == "ViT-GPT2":
55
+ return generate_caption_vitgpt(img, max_tokens, temperature)
56
+ elif model_name == "ViT-CoAttention":
57
+ return generate_caption_vitCoAtt(img)
58
+ else:
59
+ return "Caption generation for this model is not yet implemented."
60
+
61
+
62
+ sample_images = [
63
+ 'https://imgur.com/W1pIr9b',
64
+ 'https://imgur.com/MLJaWnf',
65
+ 'https://imgur.com/6XymFW1',
66
+ 'https://imgur.com/zdPjZZ1',
67
+ 'https://imgur.com/DKUlZbF'
68
+ ]
69
+
70
+ # examples = [f"example{i}.jpg" for i in range(1,7)]
71
+
72
+ interface = gr.Interface(
73
+ fn=predict,
74
+ inputs = [image, model_choice, max_tokens, temperature],
75
+ theme="gradio/monochrome"",
76
+ outputs=caption,
77
+ examples = sample_images,
78
+ )
79
+
80
+ interface.launch(debug=True)
81
 
82