Jyothirmai commited on
Commit
8875dbc
β€’
1 Parent(s): eba7622

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -33
app.py CHANGED
@@ -11,26 +11,25 @@ from build_vocab import Vocabulary
11
 
12
 
13
  # Caption generation functions
14
- def generate_caption_clipgpt(image):
15
- caption = clipGPT.generate_caption_clipgpt(image)
16
- return caption
17
 
18
- def generate_caption_vitgpt(image):
19
- caption = vitGPT.generate_caption(image)
20
- return caption
21
 
22
  def generate_caption_vitCoAtt(image):
23
- caption = ViTCoAtt.CaptionSampler.main(image)
24
- return caption
25
 
26
 
27
  with gr.Blocks() as demo:
28
-
29
-
30
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
31
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
32
-
33
-
34
  with gr.Row():
35
  sample_images = [
36
  'https://imgur.com/W1pIr9b',
@@ -38,36 +37,43 @@ with gr.Blocks() as demo:
38
  'https://imgur.com/6XymFW1',
39
  'https://imgur.com/zdPjZZ1',
40
  'https://imgur.com/DKUlZbF'
41
- ]
42
-
43
-
44
- image = gr.Image(label="Upload Chest X-ray", type="pil")
45
 
 
 
 
46
  sample_images_gallery = gr.Gallery(value = sample_images,label="Sample Images")
47
-
 
 
 
48
  with gr.Row():
49
- model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
50
-
 
 
 
 
51
  generate_button = gr.Button("Generate Caption")
 
52
 
53
 
54
-
55
  caption = gr.Textbox(label="Generated Caption")
56
-
57
- def predict(img, model_name):
58
  if model_name == "CLIP-GPT2":
59
- return generate_caption_clipgpt(img)
60
  elif model_name == "ViT-GPT2":
61
- return generate_caption_vitgpt(img)
62
  elif model_name == "ViT-CoAttention":
63
- return generate_caption_vitCoAtt(img)
64
  else:
65
- return "Caption generation for this model is not yet implemented."
66
-
67
-
68
- # Event handlers
69
- generate_button.click(predict, [image, model_choice], caption) # Trigger prediction on button click
70
- sample_images_gallery.change(predict, [sample_images_gallery, model_choice], caption) # Handle sample images
71
-
72
 
73
- demo.launch()
 
11
 
12
 
13
  # Caption generation functions
14
+ def generate_caption_clipgpt(image, max_tokens, temperature):
15
+ caption = clipGPT.generate_caption_clipgpt(image, max_tokens, temperature)
16
+ return caption
17
 
18
+ def generate_caption_vitgpt(image, max_tokens, temperature):
19
+ caption = vitGPT.generate_caption(image, max_tokens, temperature)
20
+ return caption
21
 
22
  def generate_caption_vitCoAtt(image):
23
+ caption = ViTCoAtt.CaptionSampler.main(image)
24
+ return caption
25
 
26
 
27
  with gr.Blocks() as demo:
28
+
 
29
  gr.HTML("<h1 style='text-align: center;'>MedViT: A Vision Transformer-Driven Method for Generating Medical Reports πŸ₯πŸ€–</h1>")
30
  gr.HTML("<p style='text-align: center;'>You can generate captions by uploading an X-Ray and selecting a model of your choice below</p>")
31
+
32
+
33
  with gr.Row():
34
  sample_images = [
35
  'https://imgur.com/W1pIr9b',
 
37
  'https://imgur.com/6XymFW1',
38
  'https://imgur.com/zdPjZZ1',
39
  'https://imgur.com/DKUlZbF'
40
+ ]
 
 
 
41
 
42
+
43
+ image = gr.Image(label="Upload Chest X-ray", type="pil")
44
+
45
  sample_images_gallery = gr.Gallery(value = sample_images,label="Sample Images")
46
+
47
+ gr.HTML("<p style='text-align: center;'> Please select the Number of Max Tokens and Temperature setting, if you are testing CLIP GPT2 and VIT GPT2 Models</p>")
48
+
49
+
50
  with gr.Row():
51
+
52
+ with gr.Column(): # Column for dropdowns and model choice
53
+ max_tokens = gr.Dropdown(list(range(50, 101)), label="Max Tokens", value=75)
54
+ temperature = gr.Slider(0.5, 0.9, step=0.1, label="Temperature", value=0.7)
55
+
56
+ model_choice = gr.Radio(["CLIP-GPT2", "ViT-GPT2", "ViT-CoAttention"], label="Select Model")
57
  generate_button = gr.Button("Generate Caption")
58
+
59
 
60
 
 
61
  caption = gr.Textbox(label="Generated Caption")
62
+
63
+ def predict(img, model_name, max_tokens, temperature):
64
  if model_name == "CLIP-GPT2":
65
+ return generate_caption_clipgpt(img, max_tokens, temperature)
66
  elif model_name == "ViT-GPT2":
67
+ return generate_caption_vitgpt(img, max_tokens, temperature)
68
  elif model_name == "ViT-CoAttention":
69
+ return generate_caption_vitCoAtt(img)
70
  else:
71
+ return "Caption generation for this model is not yet implemented."
72
+
73
+
74
+ # Event handlers
75
+ generate_button.click(predict, [image, model_choice, max_tokens, temperature], caption)
76
+ sample_images_gallery.click(predict, [sample_images_gallery, model_choice, max_tokens, temperature], caption)
77
+
78
 
79
+ demo.launch()