user-agent commited on
Commit
973b55e
·
verified ·
1 Parent(s): aff395c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import requests
2
  from PIL import Image
3
  from io import BytesIO
@@ -6,11 +7,15 @@ from transformers import CLIPProcessor, CLIPModel
6
  import gradio as gr
7
 
8
  # Initialize the model and processor
9
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
- model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
11
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
12
 
 
13
  def get_embedding(image_or_text):
 
 
 
 
14
  if image_or_text.startswith(('http:', 'https:')):
15
  # Image URL
16
  response = requests.get(image_or_text)
@@ -34,4 +39,4 @@ interface = gr.Interface(fn=get_embedding,
34
  description="Enter an Image URL or text to get embeddings from CLIP.")
35
 
36
  if __name__ == "__main__":
37
- interface.launch(share=True)
 
1
+ import spaces # Import spaces at the top to avoid issues
2
  import requests
3
  from PIL import Image
4
  from io import BytesIO
 
7
  import gradio as gr
8
 
9
  # Initialize the model and processor
10
+ model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
 
11
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
12
 
13
+ @spaces.GPU # Use the GPU decorator for the function that requires GPU
14
  def get_embedding(image_or_text):
15
+ # Define device within the function to ensure it uses the GPU when available
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ model.to(device)
18
+
19
  if image_or_text.startswith(('http:', 'https:')):
20
  # Image URL
21
  response = requests.get(image_or_text)
 
39
  description="Enter an Image URL or text to get embeddings from CLIP.")
40
 
41
  if __name__ == "__main__":
42
+ interface.launch(share=True)