merve HF staff commited on
Commit
554867f
1 Parent(s): 3a4430e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -12,8 +12,10 @@ import numpy as np
12
  from PIL import Image
13
  import spaces
14
 
 
 
15
  processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble")
16
- model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble").to("cuda")
17
 
18
  BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator()
19
  MASK_ANNOTATOR = sv.MaskAnnotator()
@@ -76,7 +78,7 @@ def process_video(
76
 
77
  @spaces.GPU
78
  def query(image, texts):
79
- inputs = processor(text=texts, images=image, return_tensors="pt").to("cuda")
80
  with torch.no_grad():
81
  outputs = model(**inputs)
82
  target_sizes = torch.Tensor([image.shape[:-1]])
 
12
  from PIL import Image
13
  import spaces
14
 
15
+
16
+ device = torch.device( cuda if torch.cuda.is_available() else cpu )
17
  processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble")
18
+ model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble").to(device)
19
 
20
  BOUNDING_BOX_ANNOTATOR = sv.BoundingBoxAnnotator()
21
  MASK_ANNOTATOR = sv.MaskAnnotator()
 
78
 
79
  @spaces.GPU
80
  def query(image, texts):
81
+ inputs = processor(text=texts, images=image, return_tensors="pt").to(device)
82
  with torch.no_grad():
83
  outputs = model(**inputs)
84
  target_sizes = torch.Tensor([image.shape[:-1]])