Spaces:
Running
on
T4
Running
on
T4
xinghaochen
commited on
Commit
•
2cf356e
1
Parent(s):
297b15c
Rename app_new.py to app.py
Browse files- app_new.py → app.py +16 -43
app_new.py → app.py
RENAMED
@@ -1,4 +1,4 @@
|
|
1 |
-
# Code credit: [
|
2 |
|
3 |
import copy
|
4 |
import os # noqa
|
@@ -15,11 +15,12 @@ from tinysam import sam_model_registry, SamPredictor
|
|
15 |
from huggingface_hub import snapshot_download
|
16 |
|
17 |
|
18 |
-
snapshot_download("merve/tinysam", local_dir="tinysam")
|
19 |
|
20 |
model_type = "vit_t"
|
21 |
-
sam = sam_model_registry[model_type](checkpoint="./tinysam
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
23 |
sam.to(device=device)
|
24 |
sam.eval()
|
25 |
predictor = SamPredictor(sam)
|
@@ -101,51 +102,23 @@ def segment_with_boxs(
|
|
101 |
nd_image = np.array(image)
|
102 |
img_tensor = ToTensor()(nd_image)
|
103 |
|
104 |
-
coord_np = np.array(session_state['coord_list'])
|
105 |
-
label_np = np.array(session_state['label_list'])
|
|
|
|
|
|
|
|
|
106 |
masks, scores, logits = predictor.predict(
|
107 |
-
point_coords=
|
108 |
-
point_labels=
|
|
|
109 |
)
|
110 |
print(f'scores: {scores}')
|
111 |
area = masks.sum(axis=(1, 2))
|
112 |
print(f'area: {area}')
|
113 |
annotations = np.expand_dims(masks[scores.argmax()], axis=0)
|
114 |
|
115 |
-
print(
|
116 |
-
pts_sampled = torch.reshape(torch.tensor(scaled_points), [1, 1, -1, 2])
|
117 |
-
pts_sampled = pts_sampled[:, :, :2, :]
|
118 |
-
pts_labels = torch.reshape(torch.tensor([2, 3]), [1, 1, 2])
|
119 |
-
|
120 |
-
predicted_logits, predicted_iou = model(
|
121 |
-
img_tensor[None, ...].to(device),
|
122 |
-
pts_sampled.to(device),
|
123 |
-
pts_labels.to(device),
|
124 |
-
)
|
125 |
-
predicted_logits = predicted_logits.cpu()
|
126 |
-
all_masks = torch.ge(torch.sigmoid(predicted_logits[0, 0, :, :, :]), 0.5).numpy()
|
127 |
-
predicted_iou = predicted_iou[0, 0, ...].cpu().detach().numpy()
|
128 |
-
|
129 |
-
|
130 |
-
max_predicted_iou = -1
|
131 |
-
selected_mask_using_predicted_iou = None
|
132 |
-
selected_predicted_iou = None
|
133 |
-
|
134 |
-
for m in range(all_masks.shape[0]):
|
135 |
-
curr_predicted_iou = predicted_iou[m]
|
136 |
-
if (
|
137 |
-
curr_predicted_iou > max_predicted_iou
|
138 |
-
or selected_mask_using_predicted_iou is None
|
139 |
-
):
|
140 |
-
max_predicted_iou = curr_predicted_iou
|
141 |
-
selected_mask_using_predicted_iou = all_masks[m:m+1]
|
142 |
-
selected_predicted_iou = predicted_iou[m:m+1]
|
143 |
-
|
144 |
-
results = format_results(selected_mask_using_predicted_iou, selected_predicted_iou, predicted_logits, 0)
|
145 |
-
|
146 |
-
annotations = results[0]["segmentation"]
|
147 |
-
annotations = np.array([annotations])
|
148 |
-
print(scaled_points.shape)
|
149 |
fig = fast_process(
|
150 |
annotations=annotations,
|
151 |
image=image,
|
@@ -365,7 +338,7 @@ with gr.Blocks(css=css, title="TinySAM") as demo:
|
|
365 |
gr.Examples(
|
366 |
examples=examples,
|
367 |
inputs=[cond_img_p],
|
368 |
-
examples_per_page=
|
369 |
)
|
370 |
|
371 |
with gr.Column():
|
@@ -396,7 +369,7 @@ with gr.Blocks(css=css, title="TinySAM") as demo:
|
|
396 |
examples=examples,
|
397 |
inputs=[cond_img_b],
|
398 |
|
399 |
-
examples_per_page=
|
400 |
)
|
401 |
|
402 |
with gr.Column():
|
|
|
1 |
+
# Code credit: [EfficientSAM Demo](https://huggingface.co/spaces/yunyangx/EfficientSAM).
|
2 |
|
3 |
import copy
|
4 |
import os # noqa
|
|
|
15 |
from huggingface_hub import snapshot_download
|
16 |
|
17 |
|
18 |
+
#snapshot_download("merve/tinysam", local_dir="tinysam")
|
19 |
|
20 |
model_type = "vit_t"
|
21 |
+
sam = sam_model_registry[model_type](checkpoint="./tinysam.pth")
|
22 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
+
print(device)
|
24 |
sam.to(device=device)
|
25 |
sam.eval()
|
26 |
predictor = SamPredictor(sam)
|
|
|
102 |
nd_image = np.array(image)
|
103 |
img_tensor = ToTensor()(nd_image)
|
104 |
|
105 |
+
#coord_np = np.array(session_state['coord_list'])
|
106 |
+
#label_np = np.array(session_state['label_list'])
|
107 |
+
print(scaled_points, scaled_point_label)
|
108 |
+
predictor.set_image(np.array(image))
|
109 |
+
input_box = scaled_points.reshape([4])
|
110 |
+
print('box', input_box)
|
111 |
masks, scores, logits = predictor.predict(
|
112 |
+
point_coords=None, #scaled_points,
|
113 |
+
point_labels=None, #scaled_point_label,
|
114 |
+
box=input_box[None, :]
|
115 |
)
|
116 |
print(f'scores: {scores}')
|
117 |
area = masks.sum(axis=(1, 2))
|
118 |
print(f'area: {area}')
|
119 |
annotations = np.expand_dims(masks[scores.argmax()], axis=0)
|
120 |
|
121 |
+
print(annotations)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
fig = fast_process(
|
123 |
annotations=annotations,
|
124 |
image=image,
|
|
|
338 |
gr.Examples(
|
339 |
examples=examples,
|
340 |
inputs=[cond_img_p],
|
341 |
+
examples_per_page=6,
|
342 |
)
|
343 |
|
344 |
with gr.Column():
|
|
|
369 |
examples=examples,
|
370 |
inputs=[cond_img_b],
|
371 |
|
372 |
+
examples_per_page=6,
|
373 |
)
|
374 |
|
375 |
with gr.Column():
|