kushagra124 commited on
Commit
afd6f20
1 Parent(s): 2a7fc91

adding text box

Browse files
Files changed (1) hide show
  1. app.py +1 -29
app.py CHANGED
@@ -11,7 +11,7 @@ from skimage.measure import label, regionprops
11
 
12
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
13
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
14
- global classes
15
 
16
  def rescale_bbox(bbox,orig_image_shape=(1024,1024),model_shape=352):
17
  bbox = np.asarray(bbox)/model_shape
@@ -68,34 +68,6 @@ def add_text(text):
68
  labels = text.split(',')
69
  return labels
70
 
71
- # inputt = gr.Image(type="numpy", label="Input Image for Classification")
72
-
73
- # with gr.Blocks(title="Zero Shot Object ddetection using Text Prompts") as demo :
74
- # gr.Markdown(
75
- # """
76
- # <center>
77
- # <h1>
78
- # The CLIP Model
79
- # </h1>
80
- # A neural network called CLIP which efficiently learns visual concepts from natural language supervision. CLIP can be applied to any visual classification benchmark by simply providing the names of the visual categories to be recognized, similar to the “zero-shot” capabilities of GPT-2 and GPT-3.
81
- # </center>
82
- # """
83
- # )
84
-
85
- # with gr.Row():
86
- # with gr.Column():
87
- # inputt = gr.Image(type="numpy", label="Input Image for Classification")
88
- # labels = gr.Textbox(label="Enter Label/ labels",placeholder="ex. car,person",scale=4)
89
- # button = gr.Button(value="Locate objects")
90
- # with gr.Column():
91
- # outputs = gr.Image(type="numpy", label="Detected Objects with Selected Category")
92
- # # dropdown = gr.Dropdown(labels,label="Select the category",info='Label selection panel')
93
-
94
- # # labels.submit(add_text, inputs=labels)
95
- # button.click(fn=shot,inputs=[inputt,labels],api_name='Get labels')
96
-
97
-
98
- # demo.launch()
99
  iface = gr.Interface(fn=shot,
100
  inputs = ["image","text",gr.Dropdown(classes, label="Category Label",info='Select Categories')],
101
  outputs="label",
 
11
 
12
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
13
  model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined")
14
+ classes = list()
15
 
16
  def rescale_bbox(bbox,orig_image_shape=(1024,1024),model_shape=352):
17
  bbox = np.asarray(bbox)/model_shape
 
68
  labels = text.split(',')
69
  return labels
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  iface = gr.Interface(fn=shot,
72
  inputs = ["image","text",gr.Dropdown(classes, label="Category Label",info='Select Categories')],
73
  outputs="label",