ovi054 commited on
Commit
f1e90c7
·
verified ·
1 Parent(s): 08430c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -10,22 +10,22 @@ from utils.florence import load_florence_model, run_florence_inference, \
10
  FLORENCE_OPEN_VOCABULARY_DETECTION_TASK
11
  from utils.sam import load_sam_image_model, run_sam_inference
12
 
13
- DEVICE = torch.device("cuda")
14
- # DEVICE = torch.device("cpu")
15
 
16
- torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
17
- if torch.cuda.get_device_properties(0).major >= 8:
18
- torch.backends.cuda.matmul.allow_tf32 = True
19
- torch.backends.cudnn.allow_tf32 = True
20
 
21
 
22
  FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE)
23
  SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE)
24
 
25
 
26
- @spaces.GPU(duration=20)
27
- @torch.inference_mode()
28
- @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
29
  def process_image(image_input, text_input) -> Optional[Image.Image]:
30
  if not image_input:
31
  gr.Info("Please upload an image.")
 
10
  FLORENCE_OPEN_VOCABULARY_DETECTION_TASK
11
  from utils.sam import load_sam_image_model, run_sam_inference
12
 
13
+ # DEVICE = torch.device("cuda")
14
+ DEVICE = torch.device("cpu")
15
 
16
+ # torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
17
+ # if torch.cuda.get_device_properties(0).major >= 8:
18
+ # torch.backends.cuda.matmul.allow_tf32 = True
19
+ # torch.backends.cudnn.allow_tf32 = True
20
 
21
 
22
  FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE)
23
  SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE)
24
 
25
 
26
+ # @spaces.GPU(duration=20)
27
+ # @torch.inference_mode()
28
+ # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
29
  def process_image(image_input, text_input) -> Optional[Image.Image]:
30
  if not image_input:
31
  gr.Info("Please upload an image.")