kushagra124 commited on
Commit
c043972
1 Parent(s): 7512c11

adding application for CLIP model detection

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -6,12 +6,13 @@ import numpy as np
6
  from PIL import Image
7
  import torch
8
  import cv2
9
- from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
10
  from skimage.measure import label, regionprops
11
 
12
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
13
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
14
 
 
15
  random_images = []
16
  images_dir = 'random/images/'
17
  for idx, images in enumerate(os.listdir(images_dir)):
@@ -61,7 +62,9 @@ def display_images(image,detections,prompt='traffic light'):
61
 
62
  def shot(image, labels_text):
63
  prompts = labels_text.split(',')
64
- print(prompts)
 
 
65
  detections = detect_using_clip(image,prompts=prompts)
66
  print(detections)
67
  return 0
 
6
  from PIL import Image
7
  import torch
8
  import cv2
9
+ from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation,AutoProcessor
10
  from skimage.measure import label, regionprops
11
 
12
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
13
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
14
 
15
+
16
  random_images = []
17
  images_dir = 'random/images/'
18
  for idx, images in enumerate(os.listdir(images_dir)):
 
62
 
63
  def shot(image, labels_text):
64
  prompts = labels_text.split(',')
65
+ global
66
+ classes = prompts
67
+ print(classes)
68
  detections = detect_using_clip(image,prompts=prompts)
69
  print(detections)
70
  return 0