Spaces:
Running
Running
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
|
3 |
+
import sys
|
4 |
+
import io
|
5 |
+
import requests
|
6 |
+
import json
|
7 |
+
import base64
|
8 |
+
from PIL import Image
|
9 |
+
import numpy as np
|
10 |
+
import gradio as gr
|
11 |
+
import mmengine
|
12 |
+
from mmengine import Config, get
|
13 |
+
|
14 |
+
import argparse
|
15 |
+
import os
|
16 |
+
import cv2
|
17 |
+
import yaml
|
18 |
+
import torch
|
19 |
+
from torch.utils.data import DataLoader
|
20 |
+
from tqdm import tqdm
|
21 |
+
import datasets
|
22 |
+
import models
|
23 |
+
import numpy as np
|
24 |
+
|
25 |
+
from torchvision import transforms
|
26 |
+
from mmcv.runner import load_checkpoint
|
27 |
+
from metrics import StreamSegMetrics
|
28 |
+
import visual_utils
|
29 |
+
from PIL import Image
|
30 |
+
from models.utils_prompt import get_prompt_inp, pre_prompt, pre_scatter_prompt, get_prompt_inp_scatter
|
31 |
+
|
32 |
+
device = torch.device("cpu")
|
33 |
+
|
34 |
+
def batched_predict(model, inp, coord, bsize):
|
35 |
+
with torch.no_grad():
|
36 |
+
model.gen_feat(inp)
|
37 |
+
n = coord.shape[1]
|
38 |
+
ql = 0
|
39 |
+
preds = []
|
40 |
+
while ql < n:
|
41 |
+
qr = min(ql + bsize, n)
|
42 |
+
pred = model.query_rgb(coord[:, ql: qr, :])
|
43 |
+
preds.append(pred)
|
44 |
+
ql = qr
|
45 |
+
pred = torch.cat(preds, dim=1)
|
46 |
+
return pred, preds
|
47 |
+
|
48 |
+
|
49 |
+
def tensor2PIL(tensor):
|
50 |
+
toPIL = transforms.ToPILImage()
|
51 |
+
return toPIL(tensor)
|
52 |
+
|
53 |
+
|
54 |
+
def Decoder1_optical_instance(image_input):
|
55 |
+
with open('configs/fine_tuning_one_decoder.yaml', 'r') as f:
|
56 |
+
config = yaml.load(f, Loader=yaml.FullLoader)
|
57 |
+
model = models.make(config['model']).cpu()
|
58 |
+
sam_checkpoint = torch.load("./save/model_epoch_last.pth", map_location='cpu')
|
59 |
+
model.load_state_dict(sam_checkpoint, strict=False)
|
60 |
+
model.eval()
|
61 |
+
|
62 |
+
# img = np.array(image_input).copy()
|
63 |
+
label2color = visual_utils.Label2Color(cmap=visual_utils.color_map('Unify_double'))
|
64 |
+
# image_input.save(f'./save/visual_fair1m/input_img.png', quality=5)
|
65 |
+
img = transforms.Resize([1024, 1024])(image_input)
|
66 |
+
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])])
|
67 |
+
input_img = transform(img)
|
68 |
+
input_img = transforms.ToTensor()(img).unsqueeze(0)
|
69 |
+
image_embedding = model.image_encoder(input_img) # torch.Size([1, 256, 64, 64])
|
70 |
+
sparse_embeddings, dense_embeddings, scatter_embeddings = model.prompt_encoder(
|
71 |
+
points=None,
|
72 |
+
boxes=None,
|
73 |
+
masks=None,
|
74 |
+
scatter=None)
|
75 |
+
# 目标类预测decoder
|
76 |
+
low_res_masks, iou_predictions = model.mask_decoder(
|
77 |
+
image_embeddings=image_embedding,
|
78 |
+
image_pe=model.prompt_encoder.get_dense_pe(),
|
79 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
80 |
+
dense_prompt_embeddings=dense_embeddings,
|
81 |
+
multimask_output=False
|
82 |
+
)
|
83 |
+
pred = model.postprocess_masks(low_res_masks, model.inp_size, model.inp_size)
|
84 |
+
_, prediction = pred.max(dim=1)
|
85 |
+
prediction_to_save = label2color(prediction.cpu().numpy().astype(np.uint8))[0]
|
86 |
+
|
87 |
+
return prediction_to_save
|
88 |
+
|
89 |
+
|
90 |
+
def Decoder1_optical_terrain(image_input):
|
91 |
+
with open('configs/fine_tuning_one_decoder.yaml', 'r') as f:
|
92 |
+
config = yaml.load(f, Loader=yaml.FullLoader)
|
93 |
+
model = models.make(config['model']).cpu()
|
94 |
+
sam_checkpoint = torch.load("./save/model_epoch_last.pth", map_location='cpu')
|
95 |
+
model.load_state_dict(sam_checkpoint, strict=False)
|
96 |
+
model.eval()
|
97 |
+
|
98 |
+
denorm = visual_utils.Denormalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])
|
99 |
+
label2color = visual_utils.Label2Color(cmap=visual_utils.color_map('Unify_Vai'))
|
100 |
+
# image_input.save(f'./save/visual_fair1m/input_img.png', quality=5)
|
101 |
+
img = transforms.Resize([1024, 1024])(image_input)
|
102 |
+
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])])
|
103 |
+
input_img = transform(img)
|
104 |
+
input_img = torch.unsqueeze(input_img, dim=0)
|
105 |
+
# input_img = transforms.ToTensor()(img).unsqueeze(0)
|
106 |
+
image_embedding = model.image_encoder(input_img) # torch.Size([1, 256, 64, 64])
|
107 |
+
sparse_embeddings, dense_embeddings, scatter_embeddings = model.prompt_encoder(
|
108 |
+
points=None,
|
109 |
+
boxes=None,
|
110 |
+
masks=None,
|
111 |
+
scatter=None)
|
112 |
+
low_res_masks_instanse, iou_predictions = model.mask_decoder(
|
113 |
+
image_embeddings=image_embedding,
|
114 |
+
# image_embeddings=image_embedding.unsqueeze(0),
|
115 |
+
image_pe=model.prompt_encoder.get_dense_pe(),
|
116 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
117 |
+
dense_prompt_embeddings=dense_embeddings,
|
118 |
+
# multimask_output=multimask_output,
|
119 |
+
multimask_output=False
|
120 |
+
)
|
121 |
+
# 地物类预测decoder
|
122 |
+
low_res_masks, iou_predictions_2 = model.mask_decoder_diwu(
|
123 |
+
image_embeddings=image_embedding,
|
124 |
+
image_pe=model.prompt_encoder.get_dense_pe(),
|
125 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
126 |
+
dense_prompt_embeddings=dense_embeddings,
|
127 |
+
# multimask_output=False,
|
128 |
+
multimask_output=True,
|
129 |
+
) # B*C+1*H*W
|
130 |
+
|
131 |
+
pred_instance = model.postprocess_masks(low_res_masks_instanse, model.inp_size, model.inp_size)
|
132 |
+
pred = model.postprocess_masks(low_res_masks, model.inp_size, model.inp_size)
|
133 |
+
pred = torch.softmax(pred,dim=1)
|
134 |
+
pred_instance = torch.softmax(pred_instance,dim=1)
|
135 |
+
_, prediction = pred.max(dim=1)
|
136 |
+
prediction[prediction==12]=0 #把第二个decoder里得背景变成0
|
137 |
+
print(torch.unique(prediction))
|
138 |
+
_, prediction_instance = pred_instance.max(dim=1)
|
139 |
+
print(torch.unique(prediction_instance))
|
140 |
+
prediction_sum = prediction + prediction_instance #没有冲突的位置就会正常猜测
|
141 |
+
print(torch.unique(prediction_sum))
|
142 |
+
prediction_tmp = prediction_sum.clone()
|
143 |
+
prediction_tmp[prediction_tmp==1] = 255
|
144 |
+
prediction_tmp[prediction_tmp==2] = 255
|
145 |
+
prediction_tmp[prediction_tmp==5] = 255
|
146 |
+
prediction_tmp[prediction_tmp==6] = 255
|
147 |
+
prediction_tmp[prediction_tmp==14] = 255
|
148 |
+
# prediction_tmp[prediction_tmp==0] = 255 #同时是背景
|
149 |
+
# index = prediction_tmp != 255
|
150 |
+
pred[:, 0][prediction_tmp == 255]=100 #把已经决定的像素位置的背景预测概率设置为最大
|
151 |
+
pred_instance[:, 0][prediction_tmp == 255]=100#把已经决定的像素位置的背景预测概率设置为最大
|
152 |
+
buchong = torch.zeros([1,2,1024,1024])
|
153 |
+
pred = torch.cat((pred, buchong),dim=1)
|
154 |
+
# print(torch.unique(torch.argmax(pred,dim=1)))
|
155 |
+
# Decoder1_logits = torch.zeros([1,15,1024,1024]).cuda()
|
156 |
+
Decoder2_logits = torch.zeros([1,15,1024,1024])
|
157 |
+
Decoder2_logits[:,0,...] = pred[:,0,...]
|
158 |
+
Decoder2_logits[:,5,...] = pred_instance[:,5,...]
|
159 |
+
Decoder2_logits[:,14,...] = pred_instance[:,14,...]
|
160 |
+
Decoder2_logits[:,1,...] = pred[:,1,...]
|
161 |
+
Decoder2_logits[:,2,...] = pred[:,2,...]
|
162 |
+
Decoder2_logits[:,6,...] = pred[:,6,...]
|
163 |
+
# Decoder_logits = Decoder1_logits+Decoder2_logits
|
164 |
+
pred_chongtu = torch.argmax(Decoder2_logits, dim=1)
|
165 |
+
# pred_pred = torch.argmax(Decoder1_logits, dim=1)
|
166 |
+
pred_predinstance = torch.argmax(Decoder2_logits, dim=1)
|
167 |
+
print(torch.unique(pred_chongtu))
|
168 |
+
pred_chongtu[prediction_tmp == 255] = 0
|
169 |
+
prediction_sum[prediction_tmp!=255] = 0
|
170 |
+
prediction_final = (pred_chongtu + prediction_sum).cpu().numpy()
|
171 |
+
prediction_to_save = label2color(prediction_final)[0]
|
172 |
+
|
173 |
+
return prediction_to_save
|
174 |
+
|
175 |
+
|
176 |
+
def Multi_box_prompts(input_prompt):
|
177 |
+
with open('configs/fine_tuning_one_decoder.yaml', 'r') as f:
|
178 |
+
config = yaml.load(f, Loader=yaml.FullLoader)
|
179 |
+
model = models.make(config['model']).cpu()
|
180 |
+
sam_checkpoint = torch.load("./save/model_epoch_last.pth", map_location='cpu')
|
181 |
+
model.load_state_dict(sam_checkpoint, strict=False)
|
182 |
+
model.eval()
|
183 |
+
|
184 |
+
|
185 |
+
label2color = visual_utils.Label2Color(cmap=visual_utils.color_map('Unify_double'))
|
186 |
+
# image_input.save(f'./save/visual_fair1m/input_img.png', quality=5)
|
187 |
+
img = transforms.Resize([1024, 1024])(input_prompt["image"])
|
188 |
+
input_img = transforms.ToTensor()(img).unsqueeze(0)
|
189 |
+
image_embedding = model.image_encoder(input_img) # torch.Size([1, 256, 64, 64])
|
190 |
+
sparse_embeddings, dense_embeddings, scatter_embeddings = model.prompt_encoder(
|
191 |
+
points=None,
|
192 |
+
boxes=None,
|
193 |
+
masks=None,
|
194 |
+
scatter=None)
|
195 |
+
# 目标类预测decoder
|
196 |
+
low_res_masks, iou_predictions = model.mask_decoder(
|
197 |
+
image_embeddings=image_embedding,
|
198 |
+
image_pe=model.prompt_encoder.get_dense_pe(),
|
199 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
200 |
+
dense_prompt_embeddings=dense_embeddings,
|
201 |
+
multimask_output=False
|
202 |
+
)
|
203 |
+
pred = model.postprocess_masks(low_res_masks, model.inp_size, model.inp_size)
|
204 |
+
_, prediction = pred.max(dim=1)
|
205 |
+
prediction_to_save = label2color(prediction.cpu().numpy().astype(np.uint8))[0]
|
206 |
+
|
207 |
+
def find_instance(image_map):
|
208 |
+
BACKGROUND = 0
|
209 |
+
steps = [[1, 0], [0, 1], [-1, 0], [0, -1], [1, 1], [1, -1], [-1, 1], [-1, -1]]
|
210 |
+
instances = []
|
211 |
+
|
212 |
+
def bfs(x, y, category_id):
|
213 |
+
nonlocal image_map, steps
|
214 |
+
instance = {(x, y)}
|
215 |
+
q = [(x, y)]
|
216 |
+
image_map[x, y] = BACKGROUND
|
217 |
+
while len(q) > 0:
|
218 |
+
x, y = q.pop(0)
|
219 |
+
# print(x, y, image_map[x][y])
|
220 |
+
for step in steps:
|
221 |
+
xx = step[0] + x
|
222 |
+
yy = step[1] + y
|
223 |
+
if 0 <= xx < len(image_map) and 0 <= yy < len(image_map[0]) \
|
224 |
+
and image_map[xx][yy] == category_id: # and (xx, yy) not in q:
|
225 |
+
q.append((xx, yy))
|
226 |
+
instance.add((xx, yy))
|
227 |
+
image_map[xx, yy] = BACKGROUND
|
228 |
+
return instance
|
229 |
+
image_map = image_map[:]
|
230 |
+
for i in range(len(image_map)):
|
231 |
+
for j in range(len(image_map[i])):
|
232 |
+
category_id = image_map[i][j]
|
233 |
+
if category_id == BACKGROUND:
|
234 |
+
continue
|
235 |
+
instances.append(bfs(i, j, category_id))
|
236 |
+
return instances
|
237 |
+
|
238 |
+
prompts = find_instance(np.uint8(np.array(input_prompt["mask"]).sum(-1) != 0))
|
239 |
+
img_mask = np.array(img).copy()
|
240 |
+
|
241 |
+
def get_box(prompt):
|
242 |
+
xs = []
|
243 |
+
ys = []
|
244 |
+
for x, y in prompt:
|
245 |
+
xs.append(x)
|
246 |
+
ys.append(y)
|
247 |
+
return [[min(xs), min(ys)], [max(xs), max(ys)]]
|
248 |
+
|
249 |
+
def in_box(point, box):
|
250 |
+
left_up, right_down = box
|
251 |
+
x, y = point
|
252 |
+
return x >= left_up[0] and x <= right_down[0] and y >= left_up[1] and y <= right_down[1]
|
253 |
+
|
254 |
+
def draw_box(box_outer, img, radius=4):
|
255 |
+
radius -= 1
|
256 |
+
left_up_outer, right_down_outer = box_outer
|
257 |
+
box_inner = [list(np.array(left_up_outer) + radius),
|
258 |
+
list(np.array(right_down_outer) - radius)]
|
259 |
+
for x in range(len(img)):
|
260 |
+
for y in range(len(img[x])):
|
261 |
+
if in_box([x, y], box_outer):
|
262 |
+
img_mask[x, y] = (1, 1, 1)
|
263 |
+
if in_box([x, y], box_outer) and (not in_box([x, y], box_inner)):
|
264 |
+
img[x, y] = (255, 0, 0)
|
265 |
+
return img
|
266 |
+
|
267 |
+
for prompt in prompts:
|
268 |
+
box = get_box(prompt)
|
269 |
+
output = draw_box(box, prediction_to_save) * (img_mask==1)
|
270 |
+
|
271 |
+
return output
|
272 |
+
|
273 |
+
|
274 |
+
|
275 |
+
def Decoder2_SAR(SAR_image, SAR_prompt):
|
276 |
+
with open('configs/multi_mo_multi_task_sar_prompt.yaml', 'r') as f:
|
277 |
+
config = yaml.load(f, Loader=yaml.FullLoader)
|
278 |
+
model = models.make(config['model']).cpu()
|
279 |
+
sam_checkpoint = torch.load("./save/SAR/model_epoch_last.pth", map_location='cpu')
|
280 |
+
model.load_state_dict(sam_checkpoint, strict=True)
|
281 |
+
model.eval()
|
282 |
+
|
283 |
+
denorm = visual_utils.Denormalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])
|
284 |
+
label2color = visual_utils.Label2Color(cmap=visual_utils.color_map('Unify_YIJISAR'))
|
285 |
+
|
286 |
+
img = transforms.Resize([1024, 1024])(SAR_image)
|
287 |
+
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])])
|
288 |
+
input_img = transform(img)
|
289 |
+
input_img = torch.unsqueeze(input_img, dim=0)
|
290 |
+
# input_img = transforms.ToTensor()(img).unsqueeze(0)
|
291 |
+
# input_img = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229,0.224,0.225])
|
292 |
+
filp_flag = torch.Tensor([False])
|
293 |
+
image_embedding = model.image_encoder(input_img)
|
294 |
+
|
295 |
+
# scattter_prompt = cv2.imread(scatter_file_, cv2.IMREAD_UNCHANGED)
|
296 |
+
# scattter_prompt = get_prompt_inp_scatter(name[0].replace('gt', 'JIHUAFENJIE'))
|
297 |
+
SAR_prompt = cv2.imread(SAR_prompt, cv2.IMREAD_UNCHANGED)
|
298 |
+
scatter_torch = pre_scatter_prompt(SAR_prompt, filp_flag, device=input_img.device)
|
299 |
+
scatter_torch = scatter_torch.unsqueeze(0)
|
300 |
+
scatter_torch = torch.nn.functional.interpolate(scatter_torch, size=(256, 256))
|
301 |
+
sparse_embeddings, dense_embeddings, scatter_embeddings = model.prompt_encoder(
|
302 |
+
points=None,
|
303 |
+
boxes=None,
|
304 |
+
masks=None,
|
305 |
+
scatter=scatter_torch)
|
306 |
+
# 地物类预测decoder
|
307 |
+
low_res_masks, iou_predictions_2 = model.mask_decoder_diwu(
|
308 |
+
image_embeddings=image_embedding,
|
309 |
+
image_pe=model.prompt_encoder.get_dense_pe(),
|
310 |
+
sparse_prompt_embeddings=sparse_embeddings,
|
311 |
+
dense_prompt_embeddings=dense_embeddings,
|
312 |
+
# multimask_output=False,
|
313 |
+
multimask_output=True,
|
314 |
+
) # B*C+1*H*W
|
315 |
+
pred = model.postprocess_masks(low_res_masks, model.inp_size, model.inp_size)
|
316 |
+
_, prediction = pred.max(dim=1)
|
317 |
+
prediction = prediction.cpu().numpy()
|
318 |
+
prediction_to_save = label2color(prediction)[0]
|
319 |
+
|
320 |
+
return prediction_to_save
|
321 |
+
|
322 |
+
|
323 |
+
examples1_instance = [
|
324 |
+
['./images/optical/isaid/_P0007_1065_319_image.png'],
|
325 |
+
['./images/optical/isaid/_P0466_1068_420_image.png'],
|
326 |
+
['./images/optical/isaid/_P0897_146_34_image.png'],
|
327 |
+
['./images/optical/isaid/_P1397_844_904_image.png'],
|
328 |
+
['./images/optical/isaid/_P2645_883_965_image.png'],
|
329 |
+
['./images/optical/isaid/_P1398_1290_630_image.png']
|
330 |
+
]
|
331 |
+
|
332 |
+
examples1_terrain = [
|
333 |
+
['./images/optical/vaihingen/top_mosaic_09cm_area2_105_image.png'],
|
334 |
+
['./images/optical/vaihingen/top_mosaic_09cm_area4_227_image.png'],
|
335 |
+
['./images/optical/vaihingen/top_mosaic_09cm_area20_142_image.png'],
|
336 |
+
['./images/optical/vaihingen/top_mosaic_09cm_area24_128_image.png'],
|
337 |
+
['./images/optical/vaihingen/top_mosaic_09cm_area27_34_image.png']
|
338 |
+
]
|
339 |
+
|
340 |
+
|
341 |
+
examples1_multi_box = [
|
342 |
+
['./images/optical/isaid/_P0007_1065_319_image.png'],
|
343 |
+
['./images/optical/isaid/_P0466_1068_420_image.png'],
|
344 |
+
['./images/optical/isaid/_P0897_146_34_image.png'],
|
345 |
+
['./images/optical/isaid/_P1397_844_904_image.png'],
|
346 |
+
['./images/optical/isaid/_P2645_883_965_image.png'],
|
347 |
+
['./images/optical/isaid/_P1398_1290_630_image.png']
|
348 |
+
]
|
349 |
+
|
350 |
+
|
351 |
+
examples2 = [
|
352 |
+
['./images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_4_image.png', './images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_4.png'],
|
353 |
+
['./images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_15_image.png', './images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_15.png'],
|
354 |
+
['./images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_24_image.png', './images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_24.png'],
|
355 |
+
['./images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_41_image.png', './images/sar/YIJISARGF3_MYN_QPSI_001269_E113.2_N23.0_20161105_L1A_L10002009158_ampl_41.png'],
|
356 |
+
['./images/sar/YIJISARGF3_MYN_QPSI_999996_E121.2_N30.3_20160815_L1A_L10002015572_ampl_150_image.png', './images/sar/YIJISARGF3_MYN_QPSI_999996_E121.2_N30.3_20160815_L1A_L10002015572_ampl_150.png']
|
357 |
+
]
|
358 |
+
|
359 |
+
|
360 |
+
|
361 |
+
# RingMo-SAM designs two new promptable forms based on the characteristics of multimodal remote sensing images:
|
362 |
+
# multi-boxes prompt and SAR polarization scatter prompt.
|
363 |
+
|
364 |
+
|
365 |
+
title = "RingMo-SAM:A Foundation Model for Segment Anything in Multimodal Remote Sensing Images<br> \
|
366 |
+
<div align='center'> \
|
367 |
+
<h2><a href='https://arxiv.org/abs/2304.03284' target='_blank' rel='noopener'>[paper]</a> \
|
368 |
+
<a href='https://github.com/AICyberTeam' target='_blank' rel='noopener'>[code]</a></h2> \
|
369 |
+
<br> \
|
370 |
+
<image src='file/fig1.png' width='720px' /> \
|
371 |
+
<h2>RingMo-SAM can not only segment anything in optical and SAR remote sensing data, but also identify object categories.<h2> \
|
372 |
+
</div> \
|
373 |
+
"
|
374 |
+
|
375 |
+
# with gr.Blocks() as demo:
|
376 |
+
# image_input = gr.Image(type='pil', label='Input Img')
|
377 |
+
# image_output = gr.Image(label='Segment Result', type='numpy')
|
378 |
+
|
379 |
+
|
380 |
+
Decoder_optical_instance_io = gr.Interface(fn=Decoder1_optical_instance,
|
381 |
+
inputs=[gr.Image(type='pil', label='optical_instance_img(光学图像)')],
|
382 |
+
outputs=[gr.Image(label='segment_result', type='numpy')],
|
383 |
+
# title=title,
|
384 |
+
description="<p> \
|
385 |
+
Instance_Decoder:<br>\
|
386 |
+
Instance-type objects (such as vehicle, aircraft, ship, etc.) have a smaller proportion. <br>\
|
387 |
+
Our decoder can decouple the SAM's mask decoder into instance category decoder and terrain category decoder to ensure that the model fits adequately to both types of data. <br>\
|
388 |
+
Choose an example below, or, upload optical instance images to be tested. <br>\
|
389 |
+
Examples below were never trained and are randomly selected for testing in the wild. <br>\
|
390 |
+
</p>",
|
391 |
+
allow_flagging='auto',
|
392 |
+
examples=examples1_instance,
|
393 |
+
cache_examples=False,
|
394 |
+
)
|
395 |
+
|
396 |
+
|
397 |
+
Decoder_optical_terrain_io = gr.Interface(fn=Decoder1_optical_terrain,
|
398 |
+
inputs=[gr.Image(type='pil', label='optical_terrain_img(光学图像)')],
|
399 |
+
# inputs=[gr.Image(type='pil', label='optical_img(光学图像)'), gr.Image(type='pil', label='SAR_img(SAR图像)'), gr.Image(type='pil', label='SAR_prompt(偏振散射提示)')],
|
400 |
+
outputs=[gr.Image(label='segment_result', type='numpy')],
|
401 |
+
# title=title,
|
402 |
+
description="<p> \
|
403 |
+
Terrain_Decoder:<br>\
|
404 |
+
Terrain-type objects (such as vegetation, land, river, etc.) have a larger proportion. <br>\
|
405 |
+
Our decoder can decouple the SAM's mask decoder into instance category decoder and terrain category decoder to ensure that the model fits adequately to both types of data. <br>\
|
406 |
+
Choose an example below, or, upload optical terrain images to be tested. <br>\
|
407 |
+
Examples below were never trained and are randomly selected for testing in the wild. <br>\
|
408 |
+
</p>",
|
409 |
+
allow_flagging='auto',
|
410 |
+
examples=examples1_terrain,
|
411 |
+
cache_examples=False,
|
412 |
+
)
|
413 |
+
|
414 |
+
|
415 |
+
|
416 |
+
Decoder_multi_box_prompts_io = gr.Interface(fn=Multi_box_prompts,
|
417 |
+
inputs=[gr.ImageMask(brush_radius=4, type='pil', label='input_img(图像)')],
|
418 |
+
outputs=[gr.Image(label='segment_result', type='numpy')],
|
419 |
+
# title=title,
|
420 |
+
description="<p> \
|
421 |
+
Multi-box Prompts:<br>\
|
422 |
+
Multiple boxes are sequentially encoded as concated sparse high-dimensional feature embedding, \
|
423 |
+
the corresponding multiple high-dimensional features are concated together into a high-dimensional feature vector as part of the sparse embedding. <br>\
|
424 |
+
Choose an example below, or, upload images to be tested, and then draw multi-boxes. <br>\
|
425 |
+
Examples below were never trained and are randomly selected for testing in the wild. <br>\
|
426 |
+
</p>",
|
427 |
+
allow_flagging='auto',
|
428 |
+
examples=examples1_multi_box,
|
429 |
+
cache_examples=False,
|
430 |
+
)
|
431 |
+
|
432 |
+
|
433 |
+
|
434 |
+
Decoder_SAR_io = gr.Interface(fn=Decoder2_SAR,
|
435 |
+
inputs=[gr.Image(type='pil', label='SAR_img(SAR图像)'), gr.Image(type='filepath', label='SAR_prompt(偏振散射提示)')],
|
436 |
+
outputs=[gr.Image(label='segment_result', type='numpy')],
|
437 |
+
description="<p> \
|
438 |
+
SAR Polarization Scatter Prompts:<br>\
|
439 |
+
Different terrain categories usually exhibit different scattering properties. \
|
440 |
+
Therefore, we code network for coded mapping of these SAR polarization scatter prompts to the corresponding SAR images, \
|
441 |
+
which improves the segmentation results of SAR images. <br>\
|
442 |
+
Choose an example below, or, upload SAR images and the corresponding polarization scatter prompts to be tested. <br>\
|
443 |
+
Examples below were never trained and are randomly selected for testing in the wild. <br>\
|
444 |
+
</p>",
|
445 |
+
allow_flagging='auto',
|
446 |
+
examples=examples2,
|
447 |
+
cache_examples=False,
|
448 |
+
)
|
449 |
+
|
450 |
+
|
451 |
+
# Decoder1_io.launch(server_name="0.0.0.0", server_port=34311)
|
452 |
+
# Decoder1_io.launch(enable_queue=False)
|
453 |
+
# demo = gr.TabbedInterface([Decoder1_io, Decoder2_io], ['Instance_Decoder', 'Terrain_Decoder'], title=title)
|
454 |
+
demo = gr.TabbedInterface([Decoder_optical_instance_io, Decoder_optical_terrain_io, Decoder_multi_box_prompts_io, Decoder_SAR_io], ['optical_instance_img(光学图像)', 'optical_terrain_img(光学图像)', 'multi_box_prompts(多框提示)', 'SAR_img(偏振散射提示)'], title=title).launch()
|
455 |
+
# -
|