GLEE_demo / app.py
Junfeng5's picture
Update app.py
7f3c293 verified
try:
import detectron2
except:
import os
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
os.system('cd /home/user/app/GLEE/glee/models/pixel_decoder/ops && python setup.py build install --user')
# os.system("pip uninstall -y gradio")
# os.system("pip install gradio==4.11.0")
# os.system('python -m pip install -e detectron2')
import gradio as gr
import numpy as np
import cv2
import torch
from os import path
from detectron2.config import get_cfg
from GLEE.glee.models.glee_model import GLEE_Model
from GLEE.glee.config_deeplab import add_deeplab_config
from GLEE.glee.config import add_glee_config
import torch.nn.functional as F
import torchvision
import math
from scipy.optimize import linear_sum_assignment
from obj365_name import categories as OBJ365_CATEGORIESV2
import copy
import skvideo.io
this_dir = path.dirname(path.abspath(__file__))
print(f"Is CUDA available: {torch.cuda.is_available()}")
# True
if torch.cuda.is_available():
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
# Tesla T4
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def scribble2box(img):
if img.max()==0:
return None, None
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
all = np.any(img,axis=2)
R,G,B,A = img[np.where(all)[0][0],np.where(all)[1][0]].tolist() # get color
ymin, ymax = np.where(rows)[0][[0, -1]]
xmin, xmax = np.where(cols)[0][[0, -1]]
return np.array([ xmin,ymin, xmax,ymax]), (R,G,B)
def LSJ_box_postprocess( out_bbox, padding_size, crop_size, img_h, img_w):
# postprocess box height and width
boxes = box_cxcywh_to_xyxy(out_bbox)
lsj_sclae = torch.tensor([padding_size[1], padding_size[0], padding_size[1], padding_size[0]]).to(out_bbox)
crop_scale = torch.tensor([crop_size[1], crop_size[0], crop_size[1], crop_size[0]]).to(out_bbox)
boxes = boxes * lsj_sclae
boxes = boxes / crop_scale
boxes = torch.clamp(boxes,0,1)
scale_fct = torch.tensor([img_w, img_h, img_w, img_h])
scale_fct = scale_fct.to(out_bbox)
boxes = boxes * scale_fct
return boxes
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933],
[0.494, 0.000, 0.556], [0.494, 0.000, 0.000], [0.000, 0.745, 0.000],
[0.700, 0.300, 0.600],[0.000, 0.447, 0.741], [0.850, 0.325, 0.098]]
coco_class_name = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
YTBVISOVIS_class_name = ['lizard', 'cat', 'horse', 'eagle', 'frog', 'Horse', 'monkey', 'bear', 'parrot', 'giant_panda', 'truck', 'zebra', 'rabbit', 'skateboard', 'tiger', 'shark', 'Person', 'Poultry', 'Zebra', 'Airplane', 'elephant', 'Elephant', 'Turtle', 'snake', 'train', 'Dog', 'snowboard', 'airplane', 'Lizard', 'dog', 'Cat', 'earless_seal', 'boat', 'Tiger', 'motorbike', 'duck', 'fox', 'Monkey', 'Bird', 'Bear', 'tennis_racket', 'Rabbit', 'Giraffe', 'Motorcycle', 'fish', 'Boat', 'deer', 'ape', 'Bicycle', 'Parrot', 'Cow', 'turtle', 'mouse', 'owl', 'Fish', 'surfboard', 'Giant_panda', 'Sheep', 'hand', 'Vehical', 'sedan', 'leopard', 'person', 'giraffe', 'cow']
OBJ365_class_names = [cat['name'] for cat in OBJ365_CATEGORIESV2]
class_agnostic_name = ['object']
if torch.cuda.is_available():
print('use cuda')
device = 'cuda'
else:
print('use cpu')
device='cpu'
cfg_r50 = get_cfg()
add_deeplab_config(cfg_r50)
add_glee_config(cfg_r50)
conf_files_r50 = 'GLEE/configs/R50.yaml'
checkpoints_r50 = torch.load('GLEE_R50_Scaleup10m.pth')
cfg_r50.merge_from_file(conf_files_r50)
GLEEmodel_r50 = GLEE_Model(cfg_r50, None, device, None, True).to(device)
GLEEmodel_r50.load_state_dict(checkpoints_r50, strict=False)
GLEEmodel_r50.eval()
cfg_vos = get_cfg()
add_deeplab_config(cfg_vos)
add_glee_config(cfg_vos)
conf_files_vos = 'GLEE/configs/vos_v0.yaml'
cfg_vos.merge_from_file(conf_files_vos)
cfg_swin = get_cfg()
add_deeplab_config(cfg_swin)
add_glee_config(cfg_swin)
conf_files_swin = 'GLEE/configs/SwinL.yaml'
checkpoints_swin = torch.load('GLEE_SwinL_Scaleup10m.pth')
cfg_swin.merge_from_file(conf_files_swin)
GLEEmodel_swin = GLEE_Model(cfg_swin, None, device, None, True).to(device)
GLEEmodel_swin.load_state_dict(checkpoints_swin, strict=False)
GLEEmodel_swin.eval()
cfg_eva02 = get_cfg()
add_deeplab_config(cfg_eva02)
add_glee_config(cfg_eva02)
conf_files_eva02 = 'GLEE/configs/EVA02.yaml'
checkpoints_eva = torch.load('GLEE_EVA02_Scaleup10m.pth')
cfg_eva02.merge_from_file(conf_files_eva02)
GLEEmodel_eva02 = GLEE_Model(cfg_eva02, None, device, None, True).to(device)
GLEEmodel_eva02.load_state_dict(checkpoints_eva, strict=False)
GLEEmodel_eva02.eval()
# inference_type = 'LSJ'
pixel_mean = torch.Tensor( [123.675, 116.28, 103.53]).to(device).view(3, 1, 1)
pixel_std = torch.Tensor([58.395, 57.12, 57.375]).to(device).view(3, 1, 1)
normalizer = lambda x: (x - pixel_mean) / pixel_std
inference_size = 800
video_inference_size = 640
inference_type = 'resize_shot' # or LSJ
size_divisibility = 32
FONT_SCALE = 1.5e-3
THICKNESS_SCALE = 1e-3
TEXT_Y_OFFSET_SCALE = 1e-2
if inference_type != 'LSJ':
resizer = torchvision.transforms.Resize(inference_size,antialias=True)
videoresizer = torchvision.transforms.Resize(video_inference_size,antialias=True)
else:
resizer = torchvision.transforms.Resize(size = 1535, max_size=1536, antialias=True)
videoresizer = torchvision.transforms.Resize(size = 1535, max_size=1536, antialias=True)
def segment_image(img, prompt_mode, categoryname, custom_category, expressiong, results_select, num_inst_select, threshold_select, mask_image_mix_ration, model_selection):
torch.cuda.empty_cache()
if model_selection == 'GLEE-Plus (SwinL)':
GLEEmodel = GLEEmodel_swin
inference_type = 'resize_shot'
print('use GLEE-Plus')
elif model_selection == 'GLEE-Lite (R50)':
inference_type = 'resize_shot'
GLEEmodel = GLEEmodel_r50
print('use GLEE-Lite')
else:
GLEEmodel = GLEEmodel_eva02
print('use GLEE-Pro')
inference_type = 'LSJ'
copyed_img = img['background'][:,:,:3].copy()
ori_image = torch.as_tensor(np.ascontiguousarray( copyed_img.transpose(2, 0, 1)))
ori_image = normalizer(ori_image.to(device))[None,]
_,_, ori_height, ori_width = ori_image.shape
if inference_type == 'LSJ':
resize_image = resizer(ori_image)
image_size = torch.as_tensor((resize_image.shape[-2],resize_image.shape[-1]))
re_size = resize_image.shape[-2:]
infer_image = torch.zeros(1,3,1536,1536).to(ori_image)
infer_image[:,:,:image_size[0],:image_size[1]] = resize_image
padding_size = (1536,1536)
else:
resize_image = resizer(ori_image)
image_size = torch.as_tensor((resize_image.shape[-2],resize_image.shape[-1]))
re_size = resize_image.shape[-2:]
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
padding_size = ((image_size + (stride - 1)).div(stride, rounding_mode="floor") * stride).tolist()
infer_image = torch.zeros(1,3,padding_size[0],padding_size[1]).to(resize_image)
infer_image[0,:,:image_size[0],:image_size[1]] = resize_image
# reversed_image = infer_image*pixel_std + pixel_mean
# reversed_image = torch.clip(reversed_image,min=0,max=255)
# reversed_image = reversed_image[0].permute(1,2,0)
# reversed_image = reversed_image.int().cpu().numpy().copy()
# cv2.imwrite('test.png',reversed_image[:,:,::-1])
if prompt_mode == 'categories' or prompt_mode == 'expression':
if len(results_select)==0:
results_select=['box']
if prompt_mode == 'categories':
if categoryname =="COCO-80":
batch_category_name = coco_class_name
elif categoryname =="OBJ365":
batch_category_name = OBJ365_class_names
elif categoryname =="Custom-List":
batch_category_name = custom_category.split(',')
else:
batch_category_name = class_agnostic_name
# mask_ori = torch.from_numpy(np.load('03_moto_mask.npy'))[None,]
# mask_ori = (F.interpolate(mask_ori, (height, width), mode='bilinear') > 0).to(device)
# prompt_list = [mask_ori[0]]
prompt_list = []
with torch.no_grad():
(outputs,_) = GLEEmodel(infer_image, prompt_list, task="coco", batch_name_list=batch_category_name, is_train=False)
topK_instance = max(num_inst_select,1)
else:
topK_instance = 1
prompt_list = {'grounding':[expressiong]}
with torch.no_grad():
(outputs,_) = GLEEmodel(infer_image, prompt_list, task="grounding", batch_name_list=[], is_train=False)
mask_pred = outputs['pred_masks'][0]
mask_cls = outputs['pred_logits'][0]
boxes_pred = outputs['pred_boxes'][0]
scores = mask_cls.sigmoid().max(-1)[0]
scores_per_image, topk_indices = scores.topk(topK_instance, sorted=True)
if prompt_mode == 'categories':
valid = scores_per_image>threshold_select
topk_indices = topk_indices[valid]
scores_per_image = scores_per_image[valid]
pred_class = mask_cls[topk_indices].max(-1)[1].tolist()
pred_boxes = boxes_pred[topk_indices]
boxes = LSJ_box_postprocess(pred_boxes,padding_size,re_size, ori_height,ori_width)
mask_pred = mask_pred[topk_indices]
pred_masks = F.interpolate( mask_pred[None,], size=(padding_size[0], padding_size[1]), mode="bilinear", align_corners=False )
pred_masks = pred_masks[:,:,:re_size[0],:re_size[1]]
pred_masks = F.interpolate( pred_masks, size=(ori_height,ori_width), mode="bilinear", align_corners=False )
pred_masks = (pred_masks>0).detach().cpu().numpy()[0]
if 'mask' in results_select:
zero_mask = np.zeros_like(copyed_img)
for nn, mask in enumerate(pred_masks):
# mask = mask.numpy()
mask = mask.reshape(mask.shape[0], mask.shape[1], 1)
lar = np.concatenate((mask*COLORS[nn%12][2], mask*COLORS[nn%12][1], mask*COLORS[nn%12][0]), axis = 2)
zero_mask = zero_mask+ lar
lar_valid = zero_mask>0
masked_image = lar_valid*copyed_img
img_n = masked_image*mask_image_mix_ration + np.clip(zero_mask,0,1)*255*(1-mask_image_mix_ration)
max_p = img_n.max()
img_n = 255*img_n/max_p
ret = (~lar_valid*copyed_img)*mask_image_mix_ration + img_n
ret = ret.astype('uint8')
else:
ret = copyed_img
if 'box' in results_select:
line_width = max(ret.shape) /200
for nn,(classid, box) in enumerate(zip(pred_class,boxes)):
x1,y1,x2,y2 = box.long().tolist()
RGB = (COLORS[nn%12][2]*255,COLORS[nn%12][1]*255,COLORS[nn%12][0]*255)
cv2.rectangle(ret, (x1,y1), (x2,y2), RGB, math.ceil(line_width) )
if prompt_mode == 'categories' or (prompt_mode == 'expression' and 'expression' in results_select ):
if prompt_mode == 'categories':
label = ''
if 'name' in results_select:
label += batch_category_name[classid]
if 'score' in results_select:
label += str(scores_per_image[nn].item())[:4]
else:
label = expressiong
if len(label)==0:
continue
height, width, _ = ret.shape
FONT = cv2.FONT_HERSHEY_COMPLEX
label_width, label_height = cv2.getTextSize(label, FONT, min(width, height) * FONT_SCALE, math.ceil(min(width, height) * THICKNESS_SCALE))[0]
cv2.rectangle(ret, (x1,y1), (x1+label_width,(y1 -label_height) - int(height * TEXT_Y_OFFSET_SCALE)), RGB, -1)
cv2.putText(
ret,
label,
(x1, y1 - int(height * TEXT_Y_OFFSET_SCALE)),
fontFace=FONT,
fontScale=min(width, height) * FONT_SCALE,
thickness=math.ceil(min(width, height) * THICKNESS_SCALE),
color=(255,255,255),
)
ret = ret.astype('uint8')
return ret
else: #visual prompt
topK_instance = 1
copyed_img = img['background'][:,:,:3].copy()
# get bbox from scribbles in layers
bbox_list = [scribble2box(layer) for layer in img['layers'] ]
visual_prompt_list = []
visual_prompt_RGB_list = []
for mask, (box,RGB) in zip(img['layers'], bbox_list):
if box is None:
continue
if prompt_mode=='box':
fakemask = np.zeros_like(copyed_img[:,:,0])
x1 ,y1 ,x2, y2 = box
fakemask[ y1:y2, x1:x2 ] = 1
fakemask = fakemask>0
elif prompt_mode=='point':
fakemask = np.zeros_like(copyed_img[:,:,0])
H,W = fakemask.shape
x1 ,y1 ,x2, y2 = box
center_x, center_y = (x1+x2)//2, (y1+y2)//2
fakemask[ center_y-H//40:center_y+H//40, center_x-W//40:center_x+W//40 ] = 1
fakemask = fakemask>0
elif prompt_mode=='scribble':
fakemask = mask[:,:,-1]
fakemask = fakemask>0
fakemask = torch.from_numpy(fakemask).unsqueeze(0).to(ori_image)
if inference_type == 'LSJ':
resize_fakemask = resizer(fakemask)
infer_visual_prompt = torch.zeros(1,1536,1536).to(resize_fakemask)
infer_visual_prompt[:,:image_size[0],:image_size[1]] = resize_fakemask
else:
resize_fakemask = resizer(fakemask)
if size_divisibility > 1:
# the last two dims are H,W, both subject to divisibility requirement
infer_visual_prompt = torch.zeros(1,padding_size[0],padding_size[1]).to(resize_fakemask)
infer_visual_prompt[:,:image_size[0],:image_size[1]] = resize_fakemask
visual_prompt_list.append( infer_visual_prompt>0 )
visual_prompt_RGB_list.append(RGB)
mask_results_list = []
for visual_prompt in visual_prompt_list:
prompt_list = {'spatial':[visual_prompt]}
with torch.no_grad():
(outputs,_) = GLEEmodel(infer_image, prompt_list, task="coco", batch_name_list=['object'], is_train=False, visual_prompt_type=prompt_mode )
mask_pred = outputs['pred_masks'][0]
mask_cls = outputs['pred_logits'][0]
boxes_pred = outputs['pred_boxes'][0]
scores = mask_cls.sigmoid().max(-1)[0]
scores_per_image, topk_indices = scores.topk(topK_instance, sorted=True)
pred_class = mask_cls[topk_indices].max(-1)[1].tolist()
pred_boxes = boxes_pred[topk_indices]
boxes = LSJ_box_postprocess(pred_boxes,padding_size,re_size, ori_height,ori_width)
mask_pred = mask_pred[topk_indices]
pred_masks = F.interpolate( mask_pred[None,], size=(padding_size[0], padding_size[1]), mode="bilinear", align_corners=False )
pred_masks = pred_masks[:,:,:re_size[0],:re_size[1]]
pred_masks = F.interpolate( pred_masks, size=(ori_height,ori_width), mode="bilinear", align_corners=False )
pred_masks = (pred_masks>0).detach().cpu().numpy()[0]
mask_results_list.append(pred_masks)
zero_mask = np.zeros_like(copyed_img)
for mask,RGB in zip(mask_results_list,visual_prompt_RGB_list):
mask = mask.reshape(mask.shape[-2], mask.shape[-1], 1)
lar = np.concatenate((mask*RGB[0], mask*RGB[1],mask*RGB[2]), axis = 2)
zero_mask = zero_mask+ lar
lar_valid = zero_mask>0
masked_image = lar_valid*copyed_img
img_n = masked_image*mask_image_mix_ration + np.clip(zero_mask,0,255)*(1-mask_image_mix_ration)
max_p = img_n.max()
img_n = 255*img_n/max_p
ret = (~lar_valid*copyed_img)*mask_image_mix_ration + img_n
ret = ret.astype('uint8')
# cv2.imwrite('00020_inst.jpg', cv2.cvtColor(ret, cv2.COLOR_BGR2RGB))
return ret
def process_frames(frame_list):
clip_images = [torch.as_tensor(np.ascontiguousarray( frame[:,:,::-1].transpose(2, 0, 1))) for frame in frame_list]
processed_frames = []
for ori_image in clip_images:
ori_image = normalizer(ori_image.to(device))[None,]
_,_, ori_height, ori_width = ori_image.shape
if inference_type == 'LSJ':
resize_image = resizer(ori_image)
image_size = torch.as_tensor((resize_image.shape[-2],resize_image.shape[-1]))
re_size = resize_image.shape[-2:]
infer_image = torch.zeros(1,3,1536,1536).to(ori_image)
infer_image[:,:,:image_size[0],:image_size[1]] = resize_image
padding_size = (1536,1536)
else:
resize_image = videoresizer(ori_image)
image_size = torch.as_tensor((resize_image.shape[-2],resize_image.shape[-1]))
re_size = resize_image.shape[-2:]
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
padding_size = ((image_size + (stride - 1)).div(stride, rounding_mode="floor") * stride).tolist()
infer_image = torch.zeros(1,3,padding_size[0],padding_size[1]).to(resize_image)
infer_image[0,:,:image_size[0],:image_size[1]] = resize_image
processed_frames.append(infer_image)
return torch.cat(processed_frames,dim=0), padding_size,re_size,ori_height, ori_width # [clip_lenth,3,h,w]
def match_from_embds(tgt_embds, cur_embds):
cur_embds = cur_embds / cur_embds.norm(dim=1)[:, None]
tgt_embds = tgt_embds / tgt_embds.norm(dim=1)[:, None]
cos_sim = torch.mm(cur_embds, tgt_embds.transpose(0,1))
cost_embd = 1 - cos_sim
C = 1.0 * cost_embd
C = C.cpu()
indices = linear_sum_assignment(C.transpose(0, 1)) # target x current
indices = indices[1] # permutation that makes current aligns to target
return indices
def segment_video(video, prompt_mode, categoryname, custom_category, expressiong, results_select, num_inst_select, threshold_select, mask_image_mix_ration, model_selection,video_frames_select, prompter):
torch.cuda.empty_cache()
### model selection
if model_selection == 'GLEE-Plus (SwinL)':
GLEEmodel = GLEEmodel_swin
inference_type = 'resize_shot'
print('use GLEE-Plus')
clip_length = 2 #batchsize
elif model_selection == 'GLEE-Lite (R50)':
inference_type = 'resize_shot'
GLEEmodel = GLEEmodel_r50
print('use GLEE-Lite')
clip_length = 4 #batchsize
else:
GLEEmodel = GLEEmodel_eva02
print('use GLEE-Pro')
inference_type = 'LSJ'
clip_length = 1 #batchsize
# read video and get sparse frames
cap = cv2.VideoCapture(video)
video_fps = cap.get(cv2.CAP_PROP_FPS )
print('video fps:', video_fps)
frame_list = []
frac = video_fps/30
frame_count = 0
read_fps = 10
interval = int( frac *(30 /read_fps) ) #interval frames
while cap.isOpened():
ret, frame = cap.read()
frame_count += 1
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
if frame_count % int(interval) == 0:
frame_list.append(frame)
cap.release()
first_frame = frame_list[0]
frame_list = frame_list[:video_frames_select] # max num of frames
print('num frames:', len(frame_list))
video_len = len(frame_list)
if prompt_mode == 'categories' or prompt_mode == 'expression':
if len(results_select)==0:
results_select=['box']
if prompt_mode == 'categories':
if categoryname =="COCO-80":
batch_category_name = coco_class_name
elif categoryname =="YTBVIS&OVIS":
batch_category_name = YTBVISOVIS_class_name
elif categoryname =="OBJ365":
batch_category_name = OBJ365_class_names
elif categoryname =="Custom-List":
batch_category_name = custom_category.split(',')
else:
batch_category_name = class_agnostic_name
task = 'coco'
prompt_list = []
topK_instance = num_inst_select
prompt_mode = 'categories'
results_select = ['mask', 'score', 'box', 'name']
else:
topK_instance = 1
initprompt_list = {'grounding':[expressiong]}
task = 'grounding'
batch_category_name = []
#split long video into clips to form a batch input
num_clips = math.ceil(video_len/clip_length)
logits_list, boxes_list, embed_list, masks_list = [], [], [], []
for c in range(num_clips):
start_idx = c*clip_length
end_idx = (c+1)*clip_length
clip_inputs = frame_list[start_idx:end_idx]
clip_images, padding_size,re_size,ori_height, ori_width = process_frames(clip_inputs)
if task=='grounding':
prompt_list = {'grounding': initprompt_list['grounding']*len(clip_images)}
with torch.no_grad():
(clip_output,_) = GLEEmodel(clip_images, prompt_list, task=task, batch_name_list=batch_category_name, is_train=False)
logits_list.append(clip_output['pred_logits'].detach())
boxes_list.append(clip_output['pred_boxes'].detach())
embed_list.append(clip_output['pred_track_embed'].detach())
masks_list.append(clip_output['pred_masks'].detach()) #.to(self.merge_device)
del clip_output
torch.cuda.empty_cache()
outputs = {
'pred_logits':torch.cat(logits_list,dim=0),
'pred_track_embed':torch.cat(embed_list,dim=0),
'pred_masks':torch.cat(masks_list,dim=0),
'pred_boxes': torch.cat(boxes_list,dim=0),
}
pred_logits = list(torch.unbind(outputs['pred_logits']))
pred_masks = list(torch.unbind(outputs['pred_masks']))
pred_embds = list(torch.unbind(outputs['pred_track_embed']))
pred_boxes = list(torch.unbind(outputs['pred_boxes']))
del outputs
out_logits = []
out_masks = []
out_embds = []
out_boxes = []
out_logits.append(pred_logits[0])
out_masks.append(pred_masks[0])
out_embds.append(pred_embds[0])
out_boxes.append(pred_boxes[0])
memory_embedding = out_embds[-1]
for i in range(1, len(pred_logits)):
# indices = self.match_from_embds(memory_embedding, pred_embds[i])
MA_embedding = torch.stack(out_embds[-5:]).mean(0)
indices = match_from_embds(MA_embedding, pred_embds[i])
out_logits.append(pred_logits[i][indices, :])
out_masks.append(pred_masks[i][indices, :, :])
out_embds.append(pred_embds[i][indices, :])
out_boxes.append(pred_boxes[i][indices, :])
score_weights = pred_logits[i][indices, :].sigmoid().max(-1)[0][:,None]
memory_embedding = (memory_embedding+pred_embds[i][indices, :]*score_weights )/(1+score_weights)
mask_cls = sum(out_logits)/len(out_logits)
scores = mask_cls.sigmoid().max(-1)[0]
scores_per_image, topk_indices = scores.topk(topK_instance, sorted=True)
valid = scores_per_image>threshold_select
topk_indices = topk_indices[valid]
scores_per_image = scores_per_image[valid]
out_logits = torch.stack(out_logits, dim=1)[topk_indices] # q numc -> q t numc
mask_pred = torch.stack(out_masks, dim=1)[topk_indices] # q h w -> numinst t h w
pred_boxes = torch.stack(out_boxes, dim=1)[topk_indices] # q 4 -> numinst t 4
perframe_score = out_logits.sigmoid().max(-1)[0].cpu().numpy()
pred_class = mask_cls[topk_indices].max(-1)[1].tolist()
boxes = LSJ_box_postprocess(pred_boxes,padding_size,re_size, ori_height,ori_width)
pred_masks = F.interpolate( mask_pred, size=(padding_size[0], padding_size[1]), mode="bilinear", align_corners=False )
pred_masks = pred_masks[:,:,:re_size[0],:re_size[1]]
pred_masks = F.interpolate( pred_masks, size=(ori_height,ori_width), mode="bilinear", align_corners=False )
pred_masks = (pred_masks>0).detach().cpu().numpy() # [numinst,t,h,w]
ourput_frames = []
for frameidx, ori_frame in enumerate(frame_list):
copyed_img = ori_frame.copy()
if 'mask' in results_select:
zero_mask = np.zeros_like(copyed_img)
for nn, (mask,score) in enumerate(zip(pred_masks[:,frameidx],perframe_score[:,frameidx])):
# mask = mask.numpy()
if score<threshold_select:
continue
mask = mask.reshape(mask.shape[0], mask.shape[1], 1)
lar = np.concatenate((mask*COLORS[nn%12][0], mask*COLORS[nn%12][1], mask*COLORS[nn%12][2]), axis = 2)
zero_mask = zero_mask+ lar
lar_valid = zero_mask>0
masked_image = lar_valid*copyed_img
img_n = masked_image*mask_image_mix_ration + np.clip(zero_mask,0,1)*255*(1-mask_image_mix_ration)
max_p = img_n.max()
img_n = 255*img_n/max_p
ret = (~lar_valid*copyed_img)*mask_image_mix_ration + img_n
ret = ret.astype('uint8')
else:
ret = copyed_img
if 'box' in results_select:
line_width = max(ret.shape) /200
for nn,(classid, box, score) in enumerate(zip(pred_class,boxes[:,frameidx],perframe_score[:,frameidx])):
if score<threshold_select:
continue
x1,y1,x2,y2 = box.long().tolist()
RGB = (COLORS[nn%12][0]*255,COLORS[nn%12][1]*255,COLORS[nn%12][2]*255)
cv2.rectangle(ret, (x1,y1), (x2,y2), RGB, math.ceil(line_width) )
if prompt_mode == 'categories' or (prompt_mode == 'expression' and 'expression' in results_select ):
if prompt_mode == 'categories':
label = ''
if 'name' in results_select:
label += batch_category_name[classid]
if 'score' in results_select:
label += str(score.item())[:4]
else:
label = expressiong
if 'score' in results_select:
label += str(score.item())[:4]
if len(label)==0:
continue
height, width, _ = ret.shape
FONT = cv2.FONT_HERSHEY_COMPLEX
label_width, label_height = cv2.getTextSize(label, FONT, min(width, height) * FONT_SCALE, math.ceil(min(width, height) * THICKNESS_SCALE))[0]
cv2.rectangle(ret, (x1,y1), (x1+label_width,(y1 -label_height) - int(height * TEXT_Y_OFFSET_SCALE)), RGB, -1)
cv2.putText(
ret,
label,
(x1, y1 - int(height * TEXT_Y_OFFSET_SCALE)),
fontFace=FONT,
fontScale=min(width, height) * FONT_SCALE,
thickness=math.ceil(min(width, height) * THICKNESS_SCALE),
color=(255,255,255),
)
ourput_frames.append(ret[:,:,::-1])
# ret = ret.astype('uint8')
size = (ori_width,ori_height)
print('writing video...')
output_file = "test.mp4"
writer = skvideo.io.FFmpegWriter(output_file,
inputdict={'-r': str(read_fps)},
outputdict={'-r': str(read_fps), '-vcodec': 'libx264'})
for i in range(len(ourput_frames)):
writer.writeFrame(ourput_frames[i])
writer.close()
# out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'avc1'), read_fps, size)
# for i in range(len(ourput_frames)):
# out.write(ourput_frames[i])
# out.release()
del out_logits, out_masks, out_embds, out_boxes, pred_masks
torch.cuda.empty_cache()
return output_file
else: # visual prompt vos
# image prompt segmentation
topK_instance = 1
copyed_img = prompter['background'][:,:,:3].copy()
ori_image = torch.as_tensor(np.ascontiguousarray( copyed_img.transpose(2, 0, 1)))
ori_image = normalizer(ori_image.to(device))[None,]
_,_, ori_height, ori_width = ori_image.shape
resize_image = videoresizer(ori_image)
image_size = torch.as_tensor((resize_image.shape[-2],resize_image.shape[-1]))
re_size = resize_image.shape[-2:]
if size_divisibility > 1:
stride = size_divisibility
# the last two dims are H,W, both subject to divisibility requirement
padding_size = ((image_size + (stride - 1)).div(stride, rounding_mode="floor") * stride).tolist()
infer_image = torch.zeros(1,3,padding_size[0],padding_size[1]).to(resize_image)
infer_image[0,:,:image_size[0],:image_size[1]] = resize_image
prompter['layers'] = prompter['layers'][:1] #only keep 1 prompt for VOS as model can only segment one object once infer
bbox_list = [scribble2box(layer) for layer in prompter['layers'] ]
visual_prompt_list = []
visual_prompt_RGB_list = []
for mask, (box,RGB) in zip(prompter['layers'], bbox_list):
if box is None:
continue
if prompt_mode=='box':
fakemask = np.zeros_like(copyed_img[:,:,0])
x1 ,y1 ,x2, y2 = box
fakemask[ y1:y2, x1:x2 ] = 1
fakemask = fakemask>0
elif prompt_mode=='point':
fakemask = np.zeros_like(copyed_img[:,:,0])
H,W = fakemask.shape
x1 ,y1 ,x2, y2 = box
center_x, center_y = (x1+x2)//2, (y1+y2)//2
fakemask[ center_y-H//40:center_y+H//40, center_x-W//40:center_x+W//40 ] = 1
fakemask = fakemask>0
elif prompt_mode=='scribble':
fakemask = mask[:,:,-1]
fakemask = fakemask>0
fakemask = torch.from_numpy(fakemask).unsqueeze(0).to(ori_image)
if inference_type == 'LSJ':
resize_fakemask = resizer(fakemask)
infer_visual_prompt = torch.zeros(1,1536,1536).to(resize_fakemask)
infer_visual_prompt[:,:image_size[0],:image_size[1]] = resize_fakemask
else:
resize_fakemask = videoresizer(fakemask)
if size_divisibility > 1:
# the last two dims are H,W, both subject to divisibility requirement
infer_visual_prompt = torch.zeros(1,padding_size[0],padding_size[1]).to(resize_fakemask)
infer_visual_prompt[:,:image_size[0],:image_size[1]] = resize_fakemask
visual_prompt_list.append( infer_visual_prompt>0 )
visual_prompt_RGB_list.append(RGB)
mask_results_list = []
for visual_prompt in visual_prompt_list:
prompt_list = {'spatial':[visual_prompt]}
with torch.no_grad():
(outputs,_) = GLEEmodel(infer_image, prompt_list, task="coco", batch_name_list=['object'], is_train=False, visual_prompt_type=prompt_mode )
mask_pred = outputs['pred_masks'][0]
mask_cls = outputs['pred_logits'][0]
boxes_pred = outputs['pred_boxes'][0]
scores = mask_cls.sigmoid().max(-1)[0]
scores_per_image, topk_indices = scores.topk(topK_instance, sorted=True)
pred_class = mask_cls[topk_indices].max(-1)[1].tolist()
pred_boxes = boxes_pred[topk_indices]
boxes = LSJ_box_postprocess(pred_boxes,padding_size,re_size, ori_height,ori_width)
mask_pred = mask_pred[topk_indices]
pred_masks = F.interpolate( mask_pred[None,], size=(padding_size[0], padding_size[1]), mode="bilinear", align_corners=False )
first_frame_mask_padding = copy.deepcopy(pred_masks.detach())
pred_masks = pred_masks[:,:,:re_size[0],:re_size[1]]
pred_masks = F.interpolate( pred_masks, size=(ori_height,ori_width), mode="bilinear", align_corners=False )
pred_masks = (pred_masks>0).detach().cpu().numpy()[0]
mask_results_list.append(pred_masks)
zero_mask = np.zeros_like(copyed_img)
for mask,RGB in zip(mask_results_list,visual_prompt_RGB_list):
mask = mask.reshape(mask.shape[-2], mask.shape[-1], 1)
lar = np.concatenate((mask*RGB[0], mask*RGB[1],mask*RGB[2]), axis = 2)
zero_mask = zero_mask+ lar
lar_valid = zero_mask>0
masked_image = lar_valid*copyed_img
img_n = masked_image*mask_image_mix_ration + np.clip(zero_mask,0,255)*(1-mask_image_mix_ration)
max_p = img_n.max()
img_n = 255*img_n/max_p
ret = (~lar_valid*copyed_img)*mask_image_mix_ration + img_n
ret = ret.astype('uint8')
# cv2.imwrite('00020_inst.jpg', cv2.cvtColor(ret, cv2.COLOR_BGR2RGB))
output_vos_results = []
output_vos_results.append(ret)
#### vos process
checkpoints_VOS = torch.load('GLEE_vos_r50.pth')
GLEEmodel_VOS = GLEE_Model(cfg_vos, None, device, None, True).to(device)
GLEEmodel_VOS.load_state_dict(checkpoints_VOS, strict=False)
GLEEmodel_VOS.eval()
exist_obj_dict = {}
language_dict_features_dict_init = {}
language_dict_features_dict_prev = {}
point_sample_extra = {}
for frame_idx in range(video_len):
score_dict = {}
if frame_idx==0:
exist_obj_dict.update({1:first_frame_mask_padding[0]>0 })
prompt_list["spatial"] = [first_frame_mask_padding[0]>0]
frame_image, padding_size,re_size,ori_height, ori_width = process_frames(frame_list[frame_idx:frame_idx+1])
with torch.no_grad():
language_dict_features_dict_init[1], point_sample_extra[1] = \
GLEEmodel_VOS.vos_step1(frame_image, prompt_list, 'ytbvos', batch_name_list=['object'], is_train= False)
language_dict_features_dict_prev[1] = copy.deepcopy(language_dict_features_dict_init[1])
score_dict[1] = 1.0
if frame_idx>0:
cur_obj_id=1
frame_image, padding_size,re_size,ori_height, ori_width = process_frames(frame_list[frame_idx:frame_idx+1])
prompt_list["spatial"] = [exist_obj_dict[cur_obj_id]]
language_dict_features_init = copy.deepcopy(language_dict_features_dict_init[cur_obj_id]) # Important
language_dict_features_prev = copy.deepcopy(language_dict_features_dict_prev[cur_obj_id]) # Important
language_dict_features_cur = {}
language_dict_features_cur["hidden"] = torch.cat([language_dict_features_init["hidden"], language_dict_features_prev["hidden"]], dim=1)
language_dict_features_cur["masks"] = torch.cat([language_dict_features_init["masks"], language_dict_features_prev["masks"]], dim=1)
# concat initial prompt and last frame prompt for early fusion,but only use last frame point sampled feature for decocer self attention
with torch.no_grad():
frame_output,_ = GLEEmodel_VOS.vos_step2(frame_image, task='ytbvos', language_dict_features = language_dict_features_cur, \
last_extra = point_sample_extra[cur_obj_id], batch_name_list=['object'], is_train= False)
logits = frame_output['pred_scores'][0]
top_k_propose = 1
topk_values, topk_indexes = torch.topk(logits.sigmoid(), top_k_propose, dim=0)
mask_pred_result = frame_output['pred_masks'][0,topk_indexes] #[nk,1,H,W]
# pred_embeddings = frame_output['pred_track_embed'][0,topk_indexes.squeeze()] #[nk,256]
score_dict[cur_obj_id] = topk_values.item()
if score_dict[cur_obj_id] > 0.3:
mask_pred_result = F.interpolate(
mask_pred_result,
size=(padding_size[0], padding_size[1]),
mode="bilinear",
align_corners=False,
)
exist_obj_dict[cur_obj_id] = mask_pred_result[0,0]>0
mask_pred_result = mask_pred_result[:,:,:re_size[0],:re_size[1]]
mask_pred_result = F.interpolate( mask_pred_result, size=(ori_height,ori_width), mode="bilinear", align_corners=True )[0]
final_mask = mask_pred_result[0]>0
final_mask = final_mask.cpu().numpy()
copyed_img = frame_list[frame_idx]
zero_mask = np.zeros_like(copyed_img)
RGB = visual_prompt_RGB_list[0]
mask = final_mask.reshape(final_mask.shape[0], final_mask.shape[1], 1)
lar = np.concatenate((mask*RGB[2], mask*RGB[1],mask*RGB[0]), axis = 2)
zero_mask = zero_mask+ lar
lar_valid = zero_mask>0
masked_image = lar_valid*copyed_img
img_n = masked_image*mask_image_mix_ration + np.clip(zero_mask,0,255)*(1-mask_image_mix_ration)
max_p = img_n.max()
img_n = 255*img_n/max_p
ret = (~lar_valid*copyed_img)*mask_image_mix_ration + img_n
ret = ret.astype('uint8')
output_vos_results.append(ret)
if score_dict[cur_obj_id]>0.5: # update memory
prompt_list["spatial"] = [exist_obj_dict[cur_obj_id].unsqueeze(0)]
assert cur_obj_id in language_dict_features_dict_prev
with torch.no_grad():
language_dict_features_dict_prev[cur_obj_id], point_sample_extra[cur_obj_id] = \
GLEEmodel_VOS.vos_step1(frame_image, prompt_list, 'ytbvos', batch_name_list=['object'], is_train= False)
else: # add zero as mask
copyed_img = frame_list[frame_idx]
ret = copyed_img*mask_image_mix_ration
ret = ret.astype('uint8')
output_vos_results.append(ret[:,:,::-1])
size = (ori_width,ori_height)
output_file = "test.mp4"
writer = skvideo.io.FFmpegWriter(output_file,
inputdict={'-r': str(read_fps)},
outputdict={'-r': str(read_fps), '-vcodec': 'libx264'})
for i in range(len(output_vos_results)):
writer.writeFrame(output_vos_results[i])
writer.close()
# out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'avc1'), read_fps, size)
# for i in range(len(output_vos_results)):
# out.write(output_vos_results[i])
# out.release()
torch.cuda.empty_cache()
return output_file
def visual_prompt_preview(img, prompt_mode):
copyed_img = img['background'][:,:,:3].copy()
# get bbox from scribbles in layers
bbox_list = [scribble2box(layer) for layer in img['layers'] ]
zero_mask = np.zeros_like(copyed_img)
for mask, (box,RGB) in zip(img['layers'], bbox_list):
if box is None:
continue
if prompt_mode=='box':
fakemask = np.zeros_like(copyed_img[:,:,0])
x1 ,y1 ,x2, y2 = box
fakemask[ y1:y2, x1:x2 ] = 1
fakemask = fakemask>0
elif prompt_mode=='point':
fakemask = np.zeros_like(copyed_img[:,:,0])
H,W = fakemask.shape
x1 ,y1 ,x2, y2 = box
center_x, center_y = (x1+x2)//2, (y1+y2)//2
fakemask[ center_y-H//40:center_y+H//40, center_x-W//40:center_x+W//40 ] = 1
fakemask = fakemask>0
else:
fakemask = mask[:,:,-1]
fakemask = fakemask>0
mask = fakemask.reshape(fakemask.shape[0], fakemask.shape[1], 1)
lar = np.concatenate((mask*RGB[0], mask*RGB[1],mask*RGB[2]), axis = 2)
zero_mask = zero_mask+ lar
img_n = copyed_img + np.clip(zero_mask,0,255)
max_p = img_n.max()
ret = 255*img_n/max_p
ret = ret.astype('uint8')
return ret
image_example_list = [
[
this_dir + "/Examples/000000480122.jpg",
"categories",
"COCO-80",
"",
"",
30,
],
[
this_dir + "/Examples/20231222.jpg",
"expression",
"COCO-80",
"",
"a purple star holding by a person ",
1,
],
[
this_dir + "/Examples/000000001000.jpg",
"expression",
"COCO-80",
"",
"the left boy",
1,
],
[
this_dir + "/Examples/000000001000.jpg",
"expression",
"COCO-80",
"",
"the left girl",
1,
],
[
this_dir + "/Examples/1.png",
"categories",
"Custom-List",
"manholecover, bollard, person, car, motobike",
"",
10,
],
[
this_dir + "/Examples/cat.jpg",
"categories",
"Custom-List",
"cat_eye, cat_ear, candle",
" ",
10,
],
[
this_dir + "/Examples/00000.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/000000340697.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/sa_7842964.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/1.png",
"categories",
"OBJ365",
"",
"",
50,
],
[
this_dir + "/Examples/sa_7842967.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/sa_7842976.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/sa_7842992.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
[
this_dir + "/Examples/sa_7842994.jpg",
"categories",
"COCO-80",
"",
"",
20,
],
]
video_example_list = [
[
this_dir + "/Examples/bike.mp4",
"categories",
"COCO-80",
"",
"",
10,
],
[
this_dir + "/Examples/bike.mp4",
"categories",
"Custom-List",
"backpack, bollard",
"",
5,
],
[
this_dir + "/Examples/horse.mp4",
"expression",
"COCO-80",
"",
"the left horse",
10,
],
[
this_dir + "/Examples/racing_car2.mp4",
"categories",
"COCO-80",
"",
"",
15,
],
[
this_dir + "/Examples/racing_car3.mp4",
"categories",
"COCO-80",
"",
"",
15,
],
[
this_dir + "/Examples/street.mp4",
"categories",
"OBJ365",
"",
"",
15,
],
[
this_dir + "/Examples/train.mp4",
"categories",
"COCO-80",
"",
"",
15,
],
]
with gr.Blocks(theme=gr.themes.Default()) as demo:
# gr.Markdown('# GLEE: General Object Foundation Model for Images and Videos at Scale')
gr.HTML("<p> <img src='/file=GLEE_logo.png' aligh='center' style='float:left' width='80' > <h1 class='title is-1 publication-title'> <p style='margin-left: 20px'> GLEE: General Object Foundation Model for Images and Videos at Scale </h1> ")
gr.Markdown(' [Paper](https://arxiv.org/abs/2312.09158) —— [Project Page](https://glee-vision.github.io) —— [Code](https://github.com/FoundationVision/GLEE) —— [Vdieo & Demonstration](https://youtu.be/PSVhfTPx0GQ) ')
# gr.HTML(“img src=“image link” alt=“A beautiful landscape”)
gr.Markdown(
'The functionality demonstration demo app of GLEE. \
Image tasks includes **arbitrary vocabulary** object detection&segmentation, \
**any form of object name**, object caption detection, \
referring expression comprehension, and interactive segmentation. \
Video tasks add object tracking based on image tasks.'
)
with gr.Tab("Image task"):
with gr.Row():
with gr.Column():
img_input = gr.ImageEditor()
model_select = gr.Dropdown(
["GLEE-Lite (R50)", "GLEE-Plus (SwinL)", "GLEE-Pro (EVA02L)"], value = "GLEE-Plus (SwinL)" , multiselect=False, label="Model",
)
with gr.Row():
with gr.Column():
prompt_mode_select = gr.Radio([ "categories", "expression", "point", "scribble", "box"], label="Prompt", value= "categories" , info="What kind of prompt do you want to use?")
category_select = gr.Dropdown(
["COCO-80", "OBJ365", "Custom-List", "Class-Agnostic"], visible=True, value = "COCO-80" , multiselect=False, label="Categories", info="Choose an existing category list or class-agnostic"
)
custom_category = gr.Textbox(
label="Custom Category",
info="Input custom category list, seperate by ',' ",
lines=1,
visible=False,
value="dog, cat, car, person",
)
input_expressiong = gr.Textbox(
label="Expression",
info="Input any description of an object in the image ",
lines=1,
visible=False,
value="the red car",
)
with gr.Accordion("Text based detection usage",open=False, visible=False) as textusage:
gr.Markdown(
'GLEE supports three kind of object perception methods: category list, textual description, and class-agnostic.<br />\
1.Select an existing category list from the "Categories" dropdown, like COCO or OBJ365, or customize your own list.<br />\
2.Enter arbitrary object name in "Custom Category", or choose the expression model and describe the object in "Expression Textbox" for single object detection only.<br />\
3.For class-agnostic mode, choose "Class-Agnostic" from the "Categories" dropdown.'
)
with gr.Group(visible=False,) as promptshow:
with gr.Accordion("Interactive segmentation usage",open=False):
gr.Markdown(
'For interactive segmentation:<br />\
1.Draw points, boxes, or scribbles on the canvas for multiclass segmentation; use separate layers for different objects, adding layers with a "+" sign.<br />\
2.Point mode accepts a single point only; multiple points default to the centroid, so use boxes or scribbles for larger objects.<br />\
3.After drawing, you can click "preview" button to preview the prompt visualization, but need redraw it again for segmentation; the segmentation mask follows the chosen prompt colors.'
)
image_preview_button = gr.Button("preview & redraw")
img_showbox = gr.Image(label="visual prompt area preview")
def update_component_visible(prompt,category):
if prompt in ['point', 'scribble', 'box']:
return {
category_select:gr.Dropdown(visible=False),
custom_category:gr.Textbox(visible=False),
input_expressiong: gr.Textbox(visible=False),
promptshow:gr.Group(visible=True),
textusage:gr.Accordion(visible=False),
}
elif prompt == 'categories':
if category == "Custom-List":
return {
category_select:gr.Dropdown(visible=True),
custom_category:gr.Textbox(visible=True),
input_expressiong: gr.Textbox(visible=False),
promptshow:gr.Group(visible=False),
textusage:gr.Accordion(visible=True),
}
return {
category_select:gr.Dropdown(visible=True),
custom_category:gr.Textbox(visible=False),
input_expressiong: gr.Textbox(visible=False),
promptshow:gr.Group(visible=False),
textusage:gr.Accordion(visible=True),
}
else:
return {
category_select:gr.Dropdown(visible=False),
custom_category:gr.Textbox(visible=False),
input_expressiong: gr.Textbox(visible=True),
promptshow:gr.Group(visible=False),
textusage:gr.Accordion(visible=True),
}
def update_category_showcase(category):
if category == "Custom-List":
return {
category_select:gr.Dropdown(visible=True),
custom_category:gr.Textbox(visible=True),
input_expressiong: gr.Textbox(visible=False),
promptshow:gr.Group(visible=False),
textusage:gr.Accordion(visible=True),
}
else:
return {
category_select:gr.Dropdown(visible=True),
custom_category:gr.Textbox(visible=False),
input_expressiong: gr.Textbox(visible=False),
promptshow:gr.Group(visible=False),
textusage:gr.Accordion(visible=True),
}
prompt_mode_select.input(update_component_visible,
[prompt_mode_select,category_select],
[category_select,custom_category,input_expressiong,promptshow,textusage])
category_select.input(update_category_showcase,
[category_select],
[category_select,custom_category,input_expressiong,promptshow,textusage])
# with gr.Column():
with gr.Column():
image_segment = gr.Image(label="detection and segmentation results")
with gr.Accordion("Try More Visualization Options"):
results_select = gr.CheckboxGroup(["box", "mask", "name", "score", "expression"], value=["box", "mask", "name", "score"], label="Shown Results", info="The results shown on image")
num_inst_select = gr.Slider(1, 50, value=15, step=1, label="Num of topK instances for category based detection", info="Choose between 1 and 50 for better visualization")
threshold_select = gr.Slider(0, 1, value=0.2, label="Confidence Threshold", info="Choose threshold ")
mask_image_mix_ration = gr.Slider(0, 1, value=0.65, label="Image Brightness Ratio", info="Brightness between image and colored masks ")
image_button = gr.Button("Detect & Segment")
image_preview_button.click(visual_prompt_preview, inputs = [img_input,prompt_mode_select] , outputs = img_showbox)
# img_input.change(visual_prompt_preview, inputs = [img_input,prompt_mode_select] , outputs = img_showbox)
image_button.click(segment_image, inputs=[img_input, prompt_mode_select, category_select, custom_category,input_expressiong, results_select, num_inst_select, threshold_select, mask_image_mix_ration,model_select], outputs=image_segment)
gr.Examples(
examples = image_example_list,
inputs=[img_input, prompt_mode_select, category_select, custom_category,input_expressiong,num_inst_select],
examples_per_page=20
)
with gr.Tab("Video task"):
gr.Markdown(
'#### Gradio only support .mp4 for HTML display. \
Due to computing resource restrictions, we sample and play the input video in 10 fps, and single video is limited (or cropped) to 10 seconds'
)
with gr.Row():
with gr.Column(): # video input face
video_input = gr.Video(label="Input Video", interactive=True, sources=['upload'])
video_model_select = gr.Dropdown(
["GLEE-Lite (R50)", "GLEE-Plus (SwinL)"], value = "GLEE-Lite (R50)" , multiselect=False, label="Model",
)
with gr.Row():
with gr.Column():
video_prompt_mode_select = gr.Radio([ "categories", "expression", "point", "scribble", "box"], label="Prompt", value= "categories" , info="What kind of prompt do you want to use?")
video_category_select = gr.Dropdown(
["YTBVIS&OVIS", "COCO-80", "OBJ365", "Custom-List", "Class-Agnostic"], visible=True, value = "COCO-80" , multiselect=False, label="Categories", info="Choose an existing category list or class-agnostic"
)
video_custom_category = gr.Textbox(
label="Custom Category",
info="Input custom category list, seperate by ',' ",
lines=1,
visible=False,
value="dog, cat, car, person",
)
video_input_expressiong = gr.Textbox(
label="Expression",
info="Input any description of an object in the image ",
lines=2,
visible=False,
value="the red car",
)
with gr.Accordion("Text based detection usage",open=False, visible=False) as video_textusage:
gr.Markdown(
'GLEE supports three kind of object perception methods: category list, textual description, and class-agnostic.<br />\
1.Select an existing category list from the "Categories" dropdown, like COCO or OBJ365, or customize your own list.<br />\
2.Enter arbitrary object name in "Custom Category", or choose the expression model and describe the object in "Expression Textbox" for single object detection only.<br />\
3.For class-agnostic mode, choose "Class-Agnostic" from the "Categories" dropdown.'
)
with gr.Group(visible=False,) as video_promptshow:
with gr.Accordion("Interactive segmentation usage",open=False):
gr.Markdown(
'For video interactive segmentation, draw a prompt on the first frame:<br />\
1.Draw points, boxes, or scribbles on the canvas for multiclass segmentation; only support one object tracking in interactive mode\
2.Point mode accepts a single point only; multiple points default to the centroid, so use boxes or scribbles for larger objects.<br />\
3.After drawing, click "Preview" to preview the prompt visualization; the segmentation mask follows the chosen prompt colors.'
)
with gr.Row():
video_visual_prompter = gr.ImageEditor(label="visual prompter", show_label=True ,sources=['clipboard'])
video_img_showbox = gr.Image(label="visual prompt area preview")
video_prompt_preview = gr.Button("Preview")
def update_video_component_visible(prompt,category, video):
if prompt in ['point', 'scribble', 'box']:
if video is None:
return {
video_category_select:gr.Dropdown(visible=False),
video_custom_category:gr.Textbox(visible=False),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=True),
video_textusage:gr.Accordion(visible=False),}
else:
cap = cv2.VideoCapture(video)
ret, frame = cap.read()
frame = frame[:,:,::-1].astype('uint8')
zerolayers = np.zeros((frame.shape[0],frame.shape[1],1)).astype('uint8')
alpha = 255+zerolayers
newframe = np.concatenate((frame,alpha),axis=2)
cap.release()
return {
video_category_select:gr.Dropdown(visible=False),
video_custom_category:gr.Textbox(visible=False),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=True),
video_textusage:gr.Accordion(visible=False),
video_visual_prompter:gr.ImageEditor(value= {
'background':newframe,
'layers':[ ],
'composite':newframe }),
}
elif prompt == 'categories':
if category == "Custom-List":
return {
video_category_select:gr.Dropdown(visible=True),
video_custom_category:gr.Textbox(visible=True),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=False),
video_textusage:gr.Accordion(visible=True),
}
return {
video_category_select:gr.Dropdown(visible=True),
video_custom_category:gr.Textbox(visible=False),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=False),
video_textusage:gr.Accordion(visible=True),
}
else:
return {
video_category_select:gr.Dropdown(visible=False),
video_custom_category:gr.Textbox(visible=False),
video_input_expressiong: gr.Textbox(visible=True),
video_promptshow:gr.Group(visible=False),
video_textusage:gr.Accordion(visible=True),
}
def update_video_category_showcase(category):
if category == "Custom-List":
return {
video_category_select:gr.Dropdown(visible=True),
video_custom_category:gr.Textbox(visible=True),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=False),
video_textusage:gr.Accordion(visible=True),
}
else:
return {
video_category_select:gr.Dropdown(visible=True),
video_custom_category:gr.Textbox(visible=False),
video_input_expressiong: gr.Textbox(visible=False),
video_promptshow:gr.Group(visible=False),
video_textusage:gr.Accordion(visible=True),
}
video_prompt_mode_select.input(update_video_component_visible,
[video_prompt_mode_select,video_category_select,video_input],
[video_category_select,video_custom_category,video_input_expressiong,video_promptshow,video_textusage,video_visual_prompter])
video_category_select.input(update_video_category_showcase,
[video_category_select],
[video_category_select,video_custom_category,video_input_expressiong,video_promptshow,video_textusage])
video_input.change(update_video_component_visible,
[video_prompt_mode_select,video_category_select,video_input],
[video_category_select,video_custom_category,video_input_expressiong,video_promptshow,video_textusage,video_visual_prompter])
with gr.Column():
video_output = gr.Video(label="Video Results")
with gr.Accordion("Try More Visualization Options"):
video_frames_select = gr.Slider(1, 100, value=32, step=1, label="Max frames", info="The max length for video frames, you can select fewer frames reduce the waiting time to check the effect quickly")
video_results_select = gr.CheckboxGroup(["box", "mask", "name", "score", "expression"], value=["box", "mask", "name", "score", "expression"], label="Shown Results", info="The results shown on image")
video_num_inst_select = gr.Slider(1, 30, value=10, step=1, label="Num of topK instances for category based detection", info="Choose between 1 and 50 for better visualization")
video_threshold_select = gr.Slider(0, 1, value=0.2, label="Confidence Threshold", info="Choose threshold ")
video_mask_image_mix_ration = gr.Slider(0, 1, value=0.65, label="Image Brightness Ratio", info="Brightness between image and colored masks ")
video_prompt_preview.click(visual_prompt_preview, inputs = [video_visual_prompter,video_prompt_mode_select] , outputs = video_img_showbox)
video_button = gr.Button("Segment&Track")
video_button.click(segment_video, inputs=[video_input, video_prompt_mode_select, video_category_select, video_custom_category, video_input_expressiong, video_results_select, video_num_inst_select, video_threshold_select, video_mask_image_mix_ration, video_model_select, video_frames_select, video_visual_prompter], outputs=video_output)
gr.Examples(
examples = video_example_list,
inputs=[video_input, video_prompt_mode_select, video_category_select, video_custom_category, video_input_expressiong,video_num_inst_select],
examples_per_page=20
)
if __name__ == '__main__':
demo.launch(inbrowser=True, allowed_paths=["./"])