karpurna2 commited on
Commit
97bbc07
·
1 Parent(s): 8b18427

updated with spaces

Browse files
Files changed (6) hide show
  1. app.py +2 -3
  2. requirements.txt +1 -0
  3. utils/Caption.py +2 -1
  4. utils/Emotions.py +2 -1
  5. utils/test.py +2 -1
  6. utils/utils.py +2 -1
app.py CHANGED
@@ -4,7 +4,6 @@ import unicodedata
4
  from utils.utils import get_label
5
 
6
 
7
-
8
  def sanitize_feedback(feedback):
9
  """
10
  Convert emojis or other non-text characters in feedback to a text representation.
@@ -86,7 +85,7 @@ with gr.Blocks() as demo:
86
  feedback_button.click(fn=submit_feedback, inputs=feedback, outputs=None)
87
 
88
  # Thank you message section, initially hidden
89
- thank_you_message = gr.Markdown("", visible=False)
90
 
91
  # Display thank you message when feedback is submitted
92
  feedback_submitted.change(
@@ -104,4 +103,4 @@ with gr.Blocks() as demo:
104
 
105
  # Launch the interface
106
  if __name__ == "__main__":
107
- demo.launch(share=False)
 
4
  from utils.utils import get_label
5
 
6
 
 
7
  def sanitize_feedback(feedback):
8
  """
9
  Convert emojis or other non-text characters in feedback to a text representation.
 
85
  feedback_button.click(fn=submit_feedback, inputs=feedback, outputs=None)
86
 
87
  # Thank you message section, initially hidden
88
+ thank_you_message = gr.Markdown("Thank you for your feedback!", visible=False)
89
 
90
  # Display thank you message when feedback is submitted
91
  feedback_submitted.change(
 
103
 
104
  # Launch the interface
105
  if __name__ == "__main__":
106
+ demo.launch(share=True)
requirements.txt CHANGED
@@ -11,4 +11,5 @@ opencv-python
11
  opencv-contrib-python
12
  openai-clip
13
  gradio==5.8.0
 
14
  matplotlib
 
11
  opencv-contrib-python
12
  openai-clip
13
  gradio==5.8.0
14
+ spaces
15
  matplotlib
utils/Caption.py CHANGED
@@ -1,7 +1,8 @@
1
  import torch
2
  from transformers import AutoModel, AutoTokenizer
 
3
 
4
-
5
  def get_caption(image):
6
  print(image)
7
  model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
 
1
  import torch
2
  from transformers import AutoModel, AutoTokenizer
3
+ import spaces
4
 
5
+ @spaces.GPU
6
  def get_caption(image):
7
  print(image)
8
  model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
utils/Emotions.py CHANGED
@@ -7,8 +7,9 @@ from utils.ImageOnly import Decoder4
7
  from utils.CustomDataset import CustomDataset
8
  from utils.test import test
9
  from transformers import RobertaTokenizer
 
10
 
11
-
12
  def get_emotions(image, text):
13
  tags = ['Excitement', 'Sadness', 'Amusement', 'Disgust', 'Awe', 'Contentment', 'Fear', 'Anger']
14
  max_len = 128
 
7
  from utils.CustomDataset import CustomDataset
8
  from utils.test import test
9
  from transformers import RobertaTokenizer
10
+ import spaces
11
 
12
+ @spaces.GPU
13
  def get_emotions(image, text):
14
  tags = ['Excitement', 'Sadness', 'Amusement', 'Disgust', 'Awe', 'Contentment', 'Fear', 'Anger']
15
  max_len = 128
utils/test.py CHANGED
@@ -1,7 +1,8 @@
1
  import numpy as np
2
  import torch
 
3
 
4
-
5
  def test(model, model2, decoder, device, test_loader):
6
  model = model.to(device)
7
  decoder = decoder.to(device)
 
1
  import numpy as np
2
  import torch
3
+ import spaces
4
 
5
+ @spaces.GPU
6
  def test(model, model2, decoder, device, test_loader):
7
  model = model.to(device)
8
  decoder = decoder.to(device)
utils/utils.py CHANGED
@@ -2,8 +2,9 @@ import matplotlib.pyplot as plt
2
  import numpy as np
3
  from utils.Caption import get_caption
4
  from utils.Emotions import get_emotions
 
5
 
6
-
7
  def get_label(image):
8
  caption = get_caption(image)
9
 
 
2
  import numpy as np
3
  from utils.Caption import get_caption
4
  from utils.Emotions import get_emotions
5
+ import spaces
6
 
7
+ @spaces.GPU
8
  def get_label(image):
9
  caption = get_caption(image)
10