|
# File: segment-anything-2-coreml-conversion/coreml/export.py |
|
import argparse |
|
import os |
|
import enum |
|
from typing import List, Optional, Tuple |
|
import ast |
|
import torch |
|
import numpy as np |
|
from PIL import Image |
|
from PIL.Image import Resampling |
|
import coremltools as ct |
|
from coremltools.converters.mil._deployment_compatibility import AvailableTarget |
|
from coremltools import ComputeUnit |
|
from coremltools.converters.mil.mil.passes.defs.quantization import ComputePrecision |
|
from coremltools.converters.mil import register_torch_op |
|
from coremltools.converters.mil.mil import Builder as mb |
|
from sam2.sam2_image_predictor import SAM2ImagePredictor |
|
|
|
class SAM2Variant(enum.Enum): |
|
Tiny = 'tiny' |
|
Small = 'small' |
|
BasePlus = 'base-plus' |
|
Large = 'large' |
|
|
|
def fmt(self): |
|
if self == SAM2Variant.BasePlus: |
|
return 'BasePlus' |
|
return self.value.capitalize() |
|
SAM2_HW = (1024, 1024) |
|
|
|
def parse_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: |
|
parser.add_argument('--output-dir', type=str, default='.', help='Provide location to save exported models.') |
|
parser.add_argument('--variant', type=lambda x: getattr(SAM2Variant, x), choices=[variant for variant in SAM2Variant], default=SAM2Variant.Small, help='SAM2 variant to export.') |
|
parser.add_argument('--points', type=str, help="List of 2D points, e.g., '[[10,20], [30,40]]'") |
|
parser.add_argument('--boxes', type=str, help="List of 2D bounding boxes, e.g., '[[10,20,30,40], [50,60,70,80]]'") |
|
parser.add_argument('--labels', type=str, help='List of binary labels for each points entry, denoting foreground (1) or background (0).') |
|
parser.add_argument('--min-deployment-target', type=lambda x: getattr(AvailableTarget, x), choices=[target for target in AvailableTarget], default=AvailableTarget.iOS17, help='Minimum deployment target for CoreML model.') |
|
parser.add_argument('--compute-units', type=lambda x: getattr(ComputeUnit, x), choices=[cu for cu in ComputeUnit], default=ComputeUnit.ALL, help='Which compute units to target for CoreML model.') |
|
parser.add_argument('--precision', type=lambda x: getattr(ComputePrecision, x), choices=[p for p in ComputePrecision], default=ComputePrecision.FLOAT16, help='Precision to use for quantization.') |
|
return parser |
|
|
|
@register_torch_op |
|
def upsample_bicubic2d(context, node): |
|
x = context[node.inputs[0]] |
|
output_size = context[node.inputs[1]].val |
|
scale_factor_height = output_size[0] / x.shape[2] |
|
scale_factor_width = output_size[1] / x.shape[3] |
|
align_corners = context[node.inputs[2]].val |
|
x = mb.upsample_bilinear(x=x, scale_factor_height=scale_factor_height, scale_factor_width=scale_factor_width, align_corners=align_corners, name=node.name) |
|
context.add(x) |
|
|
|
class SAM2ImageEncoder(torch.nn.Module): |
|
|
|
def __init__(self, model: SAM2ImagePredictor): |
|
super().__init__() |
|
self.model = model |
|
|
|
@torch.no_grad() |
|
def forward(self, image): |
|
(img_embedding, feats_s0, feats_s1) = self.model.encode_image_raw(image) |
|
return (img_embedding, feats_s0, feats_s1) |
|
|
|
def validate_image_encoder(model: ct.models.MLModel, ground_model: SAM2ImagePredictor, image: Image.Image): |
|
prepared_image = image.resize(SAM2_HW, Resampling.BILINEAR) |
|
predictions = model.predict({'image': prepared_image}) |
|
image = np.array(image.convert('RGB')) |
|
tch_image = ground_model._transforms(image) |
|
tch_image = tch_image[None, ...].to('cpu') |
|
(ground_embedding, ground_feats_s0, ground_feats_s1) = ground_model.encode_image_raw(tch_image) |
|
(ground_embedding, ground_feats_s0, ground_feats_s1) = (ground_embedding.numpy(), ground_feats_s0.numpy(), ground_feats_s1.numpy()) |
|
img_max_diff = np.max(np.abs(predictions['image_embedding'] - ground_embedding)) |
|
img_avg_diff = np.mean(np.abs(predictions['image_embedding'] - ground_embedding)) |
|
s0_max_diff = np.max(np.abs(predictions['feats_s0'] - ground_feats_s0)) |
|
s0_avg_diff = np.mean(np.abs(predictions['feats_s0'] - ground_feats_s0)) |
|
s1_max_diff = np.max(np.abs(predictions['feats_s1'] - ground_feats_s1)) |
|
s1_avg_diff = np.mean(np.abs(predictions['feats_s1'] - ground_feats_s1)) |
|
print(f'Image Embedding: Max Diff: {img_max_diff:.4f}, Avg Diff: {img_avg_diff:.4f}') |
|
print(f'Feats S0: Max Diff: {s0_max_diff:.4f}, Avg Diff: {s0_avg_diff:.4f}') |
|
print(f'Feats S1: Max Diff: {s1_max_diff:.4f}, Avg Diff: {s1_avg_diff:.4f}') |
|
|
|
def validate_prompt_encoder(model: ct.models.MLModel, ground_model: SAM2ImagePredictor, unnorm_coords, labels): |
|
predictions = model.predict({'points': unnorm_coords, 'labels': labels}) |
|
(ground_sparse, ground_dense) = ground_model.encode_points_raw(unnorm_coords, labels) |
|
ground_sparse = ground_sparse.numpy() |
|
ground_dense = ground_dense.numpy() |
|
sparse_max_diff = np.max(np.abs(predictions['sparse_embeddings'] - ground_sparse)) |
|
sparse_avg_diff = np.mean(np.abs(predictions['sparse_embeddings'] - ground_sparse)) |
|
dense_max_diff = np.max(np.abs(predictions['dense_embeddings'] - ground_dense)) |
|
dense_avg_diff = np.mean(np.abs(predictions['dense_embeddings'] - ground_dense)) |
|
print('Sparse Embeddings: Max Diff: {:.4f}, Avg Diff: {:.4f}'.format(sparse_max_diff, sparse_avg_diff)) |
|
print('Dense Embeddings: Max Diff: {:.4f}, Avg Diff: {:.4f}'.format(dense_max_diff, dense_avg_diff)) |
|
assert np.allclose(predictions['sparse_embeddings'], ground_sparse, atol=0.009) |
|
assert np.allclose(predictions['dense_embeddings'], ground_dense, atol=0.001) |
|
|
|
def validate_mask_decoder(model: ct.models.MLModel, ground_model: SAM2ImagePredictor, image_embedding, sparse_embedding, dense_embedding, feats_s0, feats_s1, precision: ComputePrecision): |
|
predictions = model.predict({'image_embedding': image_embedding, 'sparse_embedding': sparse_embedding, 'dense_embedding': dense_embedding, 'feats_s0': feats_s0, 'feats_s1': feats_s1}) |
|
(ground_masks, scores) = ground_model.decode_masks_raw(image_embedding, sparse_embedding, dense_embedding, [feats_s0, feats_s1]) |
|
ground_masks = ground_masks.numpy() |
|
masks_max_diff = np.max(np.abs(predictions['low_res_masks'] - ground_masks)) |
|
masks_avg_diff = np.mean(np.abs(predictions['low_res_masks'] - ground_masks)) |
|
print('Masks: Max Diff: {:.4f}, Avg Diff: {:.4f}'.format(masks_max_diff, masks_avg_diff)) |
|
atol = 0.07 if precision == ComputePrecision.FLOAT32 else 0.3 |
|
assert np.allclose(predictions['low_res_masks'], ground_masks, atol=atol) |
|
print(f"Scores: {predictions['scores']}, ground: {scores}") |
|
|
|
class SAM2PointsEncoder(torch.nn.Module): |
|
|
|
def __init__(self, model: SAM2ImagePredictor): |
|
super().__init__() |
|
self.model = model |
|
|
|
@torch.no_grad() |
|
def forward(self, points, labels): |
|
prompt_embedding = self.model.encode_points_raw(points, labels) |
|
return prompt_embedding |
|
|
|
class SAM2MaskDecoder(torch.nn.Module): |
|
|
|
def __init__(self, model: SAM2ImagePredictor): |
|
super().__init__() |
|
self.model = model |
|
|
|
@torch.no_grad() |
|
def forward(self, image_embedding, sparse_embedding, dense_embedding, feats_s0, feats_s1): |
|
(low_res_masks, iou_scores) = self.model.decode_masks_raw(image_embedding, sparse_embedding, dense_embedding, [feats_s0, feats_s1]) |
|
return (low_res_masks, iou_scores) |
|
|
|
def export_image_encoder(image_predictor: SAM2ImagePredictor, variant: SAM2Variant, output_dir: str, min_target: AvailableTarget, compute_units: ComputeUnit, precision: ComputePrecision) -> Tuple[int, int]: |
|
image = Image.open('../notebooks/images/truck.jpg') |
|
image = np.array(image.convert('RGB')) |
|
orig_hw = (image.shape[0], image.shape[1]) |
|
prepared_image = image_predictor._transforms(image) |
|
prepared_image = prepared_image[None, ...].to('cpu') |
|
traced_model = torch.jit.trace(SAM2ImageEncoder(image_predictor).eval(), prepared_image) |
|
scale = 1 / (0.226 * 255.0) |
|
bias = [-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225] |
|
mlmodel = ct.convert(traced_model, inputs=[ct.ImageType(name='image', shape=(1, 3, SAM2_HW[0], SAM2_HW[1]), scale=scale, bias=bias)], outputs=[ct.TensorType(name='image_embedding'), ct.TensorType(name='feats_s0'), ct.TensorType(name='feats_s1')], minimum_deployment_target=min_target, compute_units=compute_units, compute_precision=precision) |
|
image = Image.open('../notebooks/images/truck.jpg') |
|
validate_image_encoder(mlmodel, image_predictor, image) |
|
output_path = os.path.join(output_dir, f'SAM2{variant.fmt()}ImageEncoder{precision.value.upper()}') |
|
mlmodel.save(output_path + '.mlpackage') |
|
return orig_hw |
|
|
|
def export_points_prompt_encoder(image_predictor: SAM2ImagePredictor, variant: SAM2Variant, input_points: List[List[float]], input_labels: List[int], orig_hw: tuple, output_dir: str, min_target: AvailableTarget, compute_units: ComputeUnit, precision: ComputePrecision): |
|
image_predictor.model.sam_prompt_encoder.eval() |
|
points = torch.tensor(input_points, dtype=torch.float32) |
|
labels = torch.tensor(input_labels, dtype=torch.int32) |
|
unnorm_coords = image_predictor._transforms.transform_coords(points, normalize=True, orig_hw=orig_hw) |
|
(unnorm_coords, labels) = (unnorm_coords[None, ...], labels[None, ...]) |
|
traced_model = torch.jit.trace(SAM2PointsEncoder(image_predictor), (unnorm_coords, labels)) |
|
points_shape = ct.Shape(shape=(1, ct.RangeDim(lower_bound=1, upper_bound=16), 2)) |
|
labels_shape = ct.Shape(shape=(1, ct.RangeDim(lower_bound=1, upper_bound=16))) |
|
mlmodel = ct.convert(traced_model, inputs=[ct.TensorType(name='points', shape=points_shape), ct.TensorType(name='labels', shape=labels_shape)], outputs=[ct.TensorType(name='sparse_embeddings'), ct.TensorType(name='dense_embeddings')], minimum_deployment_target=min_target, compute_units=compute_units, compute_precision=precision) |
|
validate_prompt_encoder(mlmodel, image_predictor, unnorm_coords, labels) |
|
output_path = os.path.join(output_dir, f'SAM2{variant.fmt()}PromptEncoder{precision.value.upper()}') |
|
mlmodel.save(output_path + '.mlpackage') |
|
|
|
def export_mask_decoder(image_predictor: SAM2ImagePredictor, variant: SAM2Variant, output_dir: str, min_target: AvailableTarget, compute_units: ComputeUnit, precision: ComputePrecision): |
|
image_predictor.model.sam_mask_decoder.eval() |
|
s0 = torch.randn(1, 32, 256, 256) |
|
s1 = torch.randn(1, 64, 128, 128) |
|
image_embedding = torch.randn(1, 256, 64, 64) |
|
sparse_embedding = torch.randn(1, 3, 256) |
|
dense_embedding = torch.randn(1, 256, 64, 64) |
|
traced_model = torch.jit.trace(SAM2MaskDecoder(image_predictor), (image_embedding, sparse_embedding, dense_embedding, s0, s1)) |
|
traced_model.eval() |
|
mlmodel = ct.convert(traced_model, inputs=[ct.TensorType(name='image_embedding', shape=[1, 256, 64, 64]), ct.TensorType(name='sparse_embedding', shape=ct.EnumeratedShapes(shapes=[[1, i, 256] for i in range(2, 16)])), ct.TensorType(name='dense_embedding', shape=[1, 256, 64, 64]), ct.TensorType(name='feats_s0', shape=[1, 32, 256, 256]), ct.TensorType(name='feats_s1', shape=[1, 64, 128, 128])], outputs=[ct.TensorType(name='low_res_masks'), ct.TensorType(name='scores')], minimum_deployment_target=min_target, compute_units=compute_units, compute_precision=precision) |
|
validate_mask_decoder(mlmodel, image_predictor, image_embedding, sparse_embedding, dense_embedding, s0, s1, precision) |
|
output_path = os.path.join(output_dir, f'SAM2{variant.fmt()}MaskDecoder{precision.value.upper()}') |
|
mlmodel.save(output_path + '.mlpackage') |
|
Point = Tuple[float, float] |
|
Box = Tuple[float, float, float, float] |
|
|
|
def export(output_dir: str, variant: SAM2Variant, points: Optional[List[Point]], boxes: Optional[List[Box]], labels: Optional[List[int]], min_target: AvailableTarget, compute_units: ComputeUnit, precision: ComputePrecision): |
|
os.makedirs(output_dir, exist_ok=True) |
|
device = torch.device('cpu') |
|
sam2_checkpoint = f'facebook/sam2-hiera-{variant.value}' |
|
with torch.no_grad(): |
|
img_predictor = SAM2ImagePredictor.from_pretrained(sam2_checkpoint, device=device) |
|
img_predictor.model.eval() |
|
orig_hw = export_image_encoder(img_predictor, variant, output_dir, min_target, compute_units, precision) |
|
if boxes is not None and points is None: |
|
raise ValueError('Boxes are not supported yet') |
|
else: |
|
export_points_prompt_encoder(img_predictor, variant, points, labels, orig_hw, output_dir, min_target, compute_units, precision) |
|
export_mask_decoder(img_predictor, variant, output_dir, min_target, compute_units, precision) |
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser(description='SAM2 -> CoreML CLI') |
|
parser = parse_args(parser) |
|
args = parser.parse_args() |
|
(points, boxes, labels) = (None, None, None) |
|
if args.points: |
|
points = [tuple(p) for p in ast.literal_eval(args.points)] |
|
if args.boxes: |
|
boxes = [tuple(b) for b in ast.literal_eval(args.boxes)] |
|
if args.labels: |
|
labels = ast.literal_eval(args.labels) |
|
if boxes and points: |
|
raise ValueError('Cannot provide both points and boxes') |
|
if points: |
|
if not isinstance(points, list) or not all((isinstance(p, tuple) and len(p) == 2 for p in points)): |
|
raise ValueError('Points must be a tuple of 2D points') |
|
if labels: |
|
if not isinstance(labels, list) or not all((isinstance(l, int) and l in [0, 1] for l in labels)): |
|
raise ValueError('Labels must denote foreground (1) or background (0)') |
|
if points: |
|
if len(points) != len(labels): |
|
raise ValueError('Number of points must match the number of labels') |
|
if len(points) > 16: |
|
raise ValueError('Number of points must be less than or equal to 16') |
|
if boxes: |
|
if not isinstance(boxes, list) or not all((isinstance(b, tuple) and len(b) == 4 for b in boxes)): |
|
raise ValueError('Boxes must be a tuple of 4D bounding boxes') |
|
export(args.output_dir, args.variant, points, boxes, labels, args.min_deployment_target, args.compute_units, args.precision) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/automatic_mask_generator.py |
|
from typing import Any, Dict, List, Optional, Tuple |
|
import numpy as np |
|
import torch |
|
from torchvision.ops.boxes import batched_nms, box_area |
|
from sam2.modeling.sam2_base import SAM2Base |
|
from sam2.sam2_image_predictor import SAM2ImagePredictor |
|
from sam2.utils.amg import area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, MaskData, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points |
|
|
|
class SAM2AutomaticMaskGenerator: |
|
|
|
def __init__(self, model: SAM2Base, points_per_side: Optional[int]=32, points_per_batch: int=64, pred_iou_thresh: float=0.8, stability_score_thresh: float=0.95, stability_score_offset: float=1.0, mask_threshold: float=0.0, box_nms_thresh: float=0.7, crop_n_layers: int=0, crop_nms_thresh: float=0.7, crop_overlap_ratio: float=512 / 1500, crop_n_points_downscale_factor: int=1, point_grids: Optional[List[np.ndarray]]=None, min_mask_region_area: int=0, output_mode: str='binary_mask', use_m2m: bool=False, multimask_output: bool=True, **kwargs) -> None: |
|
assert (points_per_side is None) != (point_grids is None), 'Exactly one of points_per_side or point_grid must be provided.' |
|
if points_per_side is not None: |
|
self.point_grids = build_all_layer_point_grids(points_per_side, crop_n_layers, crop_n_points_downscale_factor) |
|
elif point_grids is not None: |
|
self.point_grids = point_grids |
|
else: |
|
raise ValueError("Can't have both points_per_side and point_grid be None.") |
|
assert output_mode in ['binary_mask', 'uncompressed_rle', 'coco_rle'], f'Unknown output_mode {output_mode}.' |
|
if output_mode == 'coco_rle': |
|
try: |
|
from pycocotools import mask as mask_utils |
|
except ImportError as e: |
|
print('Please install pycocotools') |
|
raise e |
|
self.predictor = SAM2ImagePredictor(model, max_hole_area=min_mask_region_area, max_sprinkle_area=min_mask_region_area) |
|
self.points_per_batch = points_per_batch |
|
self.pred_iou_thresh = pred_iou_thresh |
|
self.stability_score_thresh = stability_score_thresh |
|
self.stability_score_offset = stability_score_offset |
|
self.mask_threshold = mask_threshold |
|
self.box_nms_thresh = box_nms_thresh |
|
self.crop_n_layers = crop_n_layers |
|
self.crop_nms_thresh = crop_nms_thresh |
|
self.crop_overlap_ratio = crop_overlap_ratio |
|
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor |
|
self.min_mask_region_area = min_mask_region_area |
|
self.output_mode = output_mode |
|
self.use_m2m = use_m2m |
|
self.multimask_output = multimask_output |
|
|
|
@classmethod |
|
def from_pretrained(cls, model_id: str, **kwargs) -> 'SAM2AutomaticMaskGenerator': |
|
from sam2.build_sam import build_sam2_hf |
|
sam_model = build_sam2_hf(model_id, **kwargs) |
|
return cls(sam_model, **kwargs) |
|
|
|
@torch.no_grad() |
|
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: |
|
mask_data = self._generate_masks(image) |
|
if self.output_mode == 'coco_rle': |
|
mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] |
|
elif self.output_mode == 'binary_mask': |
|
mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] |
|
else: |
|
mask_data['segmentations'] = mask_data['rles'] |
|
curr_anns = [] |
|
for idx in range(len(mask_data['segmentations'])): |
|
ann = {'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist()} |
|
curr_anns.append(ann) |
|
return curr_anns |
|
|
|
def _generate_masks(self, image: np.ndarray) -> MaskData: |
|
orig_size = image.shape[:2] |
|
(crop_boxes, layer_idxs) = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) |
|
data = MaskData() |
|
for (crop_box, layer_idx) in zip(crop_boxes, layer_idxs): |
|
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) |
|
data.cat(crop_data) |
|
if len(crop_boxes) > 1: |
|
scores = 1 / box_area(data['crop_boxes']) |
|
scores = scores.to(data['boxes'].device) |
|
keep_by_nms = batched_nms(data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), iou_threshold=self.crop_nms_thresh) |
|
data.filter(keep_by_nms) |
|
data.to_numpy() |
|
return data |
|
|
|
def _process_crop(self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...]) -> MaskData: |
|
(x0, y0, x1, y1) = crop_box |
|
cropped_im = image[y0:y1, x0:x1, :] |
|
cropped_im_size = cropped_im.shape[:2] |
|
self.predictor.set_image(cropped_im) |
|
points_scale = np.array(cropped_im_size)[None, ::-1] |
|
points_for_image = self.point_grids[crop_layer_idx] * points_scale |
|
data = MaskData() |
|
for (points,) in batch_iterator(self.points_per_batch, points_for_image): |
|
batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, normalize=True) |
|
data.cat(batch_data) |
|
del batch_data |
|
self.predictor.reset_predictor() |
|
keep_by_nms = batched_nms(data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), iou_threshold=self.box_nms_thresh) |
|
data.filter(keep_by_nms) |
|
data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) |
|
data['points'] = uncrop_points(data['points'], crop_box) |
|
data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) |
|
return data |
|
|
|
def _process_batch(self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], normalize=False) -> MaskData: |
|
(orig_h, orig_w) = orig_size |
|
points = torch.as_tensor(points, dtype=torch.float32, device=self.predictor.device) |
|
in_points = self.predictor._transforms.transform_coords(points, normalize=normalize, orig_hw=im_size) |
|
in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) |
|
(masks, iou_preds, low_res_masks) = self.predictor._predict(in_points[:, None, :], in_labels[:, None], multimask_output=self.multimask_output, return_logits=True) |
|
data = MaskData(masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=points.repeat_interleave(masks.shape[1], dim=0), low_res_masks=low_res_masks.flatten(0, 1)) |
|
del masks |
|
if not self.use_m2m: |
|
if self.pred_iou_thresh > 0.0: |
|
keep_mask = data['iou_preds'] > self.pred_iou_thresh |
|
data.filter(keep_mask) |
|
data['stability_score'] = calculate_stability_score(data['masks'], self.mask_threshold, self.stability_score_offset) |
|
if self.stability_score_thresh > 0.0: |
|
keep_mask = data['stability_score'] >= self.stability_score_thresh |
|
data.filter(keep_mask) |
|
else: |
|
in_points = self.predictor._transforms.transform_coords(data['points'], normalize=normalize, orig_hw=im_size) |
|
labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) |
|
(masks, ious) = self.refine_with_m2m(in_points, labels, data['low_res_masks'], self.points_per_batch) |
|
data['masks'] = masks.squeeze(1) |
|
data['iou_preds'] = ious.squeeze(1) |
|
if self.pred_iou_thresh > 0.0: |
|
keep_mask = data['iou_preds'] > self.pred_iou_thresh |
|
data.filter(keep_mask) |
|
data['stability_score'] = calculate_stability_score(data['masks'], self.mask_threshold, self.stability_score_offset) |
|
if self.stability_score_thresh > 0.0: |
|
keep_mask = data['stability_score'] >= self.stability_score_thresh |
|
data.filter(keep_mask) |
|
data['masks'] = data['masks'] > self.mask_threshold |
|
data['boxes'] = batched_mask_to_box(data['masks']) |
|
keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) |
|
if not torch.all(keep_mask): |
|
data.filter(keep_mask) |
|
data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w) |
|
data['rles'] = mask_to_rle_pytorch(data['masks']) |
|
del data['masks'] |
|
return data |
|
|
|
@staticmethod |
|
def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData: |
|
if len(mask_data['rles']) == 0: |
|
return mask_data |
|
new_masks = [] |
|
scores = [] |
|
for rle in mask_data['rles']: |
|
mask = rle_to_mask(rle) |
|
(mask, changed) = remove_small_regions(mask, min_area, mode='holes') |
|
unchanged = not changed |
|
(mask, changed) = remove_small_regions(mask, min_area, mode='islands') |
|
unchanged = unchanged and (not changed) |
|
new_masks.append(torch.as_tensor(mask).unsqueeze(0)) |
|
scores.append(float(unchanged)) |
|
masks = torch.cat(new_masks, dim=0) |
|
boxes = batched_mask_to_box(masks) |
|
keep_by_nms = batched_nms(boxes.float(), torch.as_tensor(scores), torch.zeros_like(boxes[:, 0]), iou_threshold=nms_thresh) |
|
for i_mask in keep_by_nms: |
|
if scores[i_mask] == 0.0: |
|
mask_torch = masks[i_mask].unsqueeze(0) |
|
mask_data['rles'][i_mask] = mask_to_rle_pytorch(mask_torch)[0] |
|
mask_data['boxes'][i_mask] = boxes[i_mask] |
|
mask_data.filter(keep_by_nms) |
|
return mask_data |
|
|
|
def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch): |
|
new_masks = [] |
|
new_iou_preds = [] |
|
for (cur_points, cur_point_labels, low_res_mask) in batch_iterator(points_per_batch, points, point_labels, low_res_masks): |
|
(best_masks, best_iou_preds, _) = self.predictor._predict(cur_points[:, None, :], cur_point_labels[:, None], mask_input=low_res_mask[:, None, :], multimask_output=False, return_logits=True) |
|
new_masks.append(best_masks) |
|
new_iou_preds.append(best_iou_preds) |
|
masks = torch.cat(new_masks, dim=0) |
|
return (masks, torch.cat(new_iou_preds, dim=0)) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/build_sam.py |
|
import logging |
|
import torch |
|
from hydra import compose |
|
from hydra.utils import instantiate |
|
from omegaconf import OmegaConf |
|
|
|
def build_sam2(config_file, ckpt_path=None, device='cuda', mode='eval', hydra_overrides_extra=[], apply_postprocessing=True, **kwargs): |
|
if apply_postprocessing: |
|
hydra_overrides_extra = hydra_overrides_extra.copy() |
|
hydra_overrides_extra += ['++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true', '++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05', '++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98'] |
|
cfg = compose(config_name=config_file, overrides=hydra_overrides_extra) |
|
OmegaConf.resolve(cfg) |
|
model = instantiate(cfg.model, _recursive_=True) |
|
_load_checkpoint(model, ckpt_path) |
|
model = model.to(device) |
|
if mode == 'eval': |
|
model.eval() |
|
return model |
|
|
|
def build_sam2_video_predictor(config_file, ckpt_path=None, device='cuda', mode='eval', hydra_overrides_extra=[], apply_postprocessing=True, **kwargs): |
|
hydra_overrides = ['++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor'] |
|
if apply_postprocessing: |
|
hydra_overrides_extra = hydra_overrides_extra.copy() |
|
hydra_overrides_extra += ['++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true', '++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05', '++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98', '++model.binarize_mask_from_pts_for_mem_enc=true', '++model.fill_hole_area=8'] |
|
hydra_overrides.extend(hydra_overrides_extra) |
|
cfg = compose(config_name=config_file, overrides=hydra_overrides) |
|
OmegaConf.resolve(cfg) |
|
model = instantiate(cfg.model, _recursive_=True) |
|
_load_checkpoint(model, ckpt_path) |
|
model = model.to(device) |
|
if mode == 'eval': |
|
model.eval() |
|
return model |
|
|
|
def build_sam2_hf(model_id, **kwargs): |
|
from huggingface_hub import hf_hub_download |
|
model_id_to_filenames = {'facebook/sam2-hiera-tiny': ('sam2_hiera_t.yaml', 'sam2_hiera_tiny.pt'), 'facebook/sam2-hiera-small': ('sam2_hiera_s.yaml', 'sam2_hiera_small.pt'), 'facebook/sam2-hiera-base-plus': ('sam2_hiera_b+.yaml', 'sam2_hiera_base_plus.pt'), 'facebook/sam2-hiera-large': ('sam2_hiera_l.yaml', 'sam2_hiera_large.pt')} |
|
(config_name, checkpoint_name) = model_id_to_filenames[model_id] |
|
ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name) |
|
return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs) |
|
|
|
def build_sam2_video_predictor_hf(model_id, **kwargs): |
|
from huggingface_hub import hf_hub_download |
|
model_id_to_filenames = {'facebook/sam2-hiera-tiny': ('sam2_hiera_t.yaml', 'sam2_hiera_tiny.pt'), 'facebook/sam2-hiera-small': ('sam2_hiera_s.yaml', 'sam2_hiera_small.pt'), 'facebook/sam2-hiera-base-plus': ('sam2_hiera_b+.yaml', 'sam2_hiera_base_plus.pt'), 'facebook/sam2-hiera-large': ('sam2_hiera_l.yaml', 'sam2_hiera_large.pt')} |
|
(config_name, checkpoint_name) = model_id_to_filenames[model_id] |
|
ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name) |
|
return build_sam2_video_predictor(config_file=config_name, ckpt_path=ckpt_path, **kwargs) |
|
|
|
def _load_checkpoint(model, ckpt_path): |
|
if ckpt_path is not None: |
|
sd = torch.load(ckpt_path, map_location='cpu')['model'] |
|
(missing_keys, unexpected_keys) = model.load_state_dict(sd) |
|
if missing_keys: |
|
logging.error(missing_keys) |
|
raise RuntimeError() |
|
if unexpected_keys: |
|
logging.error(unexpected_keys) |
|
raise RuntimeError() |
|
logging.info('Loaded checkpoint sucessfully') |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/backbones/hieradet.py |
|
from functools import partial |
|
from typing import List, Tuple, Union |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from sam2.modeling.backbones.utils import PatchEmbed, window_partition, window_unpartition |
|
from sam2.modeling.sam2_utils import DropPath, MLP |
|
|
|
def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module=None) -> torch.Tensor: |
|
if pool is None: |
|
return x |
|
x = x.permute(0, 3, 1, 2) |
|
x = pool(x) |
|
x = x.permute(0, 2, 3, 1) |
|
if norm: |
|
x = norm(x) |
|
return x |
|
|
|
class MultiScaleAttention(nn.Module): |
|
|
|
def __init__(self, dim: int, dim_out: int, num_heads: int, q_pool: nn.Module=None): |
|
super().__init__() |
|
self.dim = dim |
|
self.dim_out = dim_out |
|
self.num_heads = num_heads |
|
self.q_pool = q_pool |
|
self.qkv = nn.Linear(dim, dim_out * 3) |
|
self.proj = nn.Linear(dim_out, dim_out) |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
(B, H, W, _) = x.shape |
|
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1) |
|
(q, k, v) = torch.unbind(qkv, 2) |
|
if self.q_pool: |
|
q = do_pool(q.reshape(B, H, W, -1), self.q_pool) |
|
(H, W) = q.shape[1:3] |
|
q = q.reshape(B, H * W, self.num_heads, -1) |
|
x = F.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)) |
|
x = x.transpose(1, 2) |
|
x = x.reshape(B, H, W, -1) |
|
x = self.proj(x) |
|
return x |
|
|
|
class MultiScaleBlock(nn.Module): |
|
|
|
def __init__(self, dim: int, dim_out: int, num_heads: int, mlp_ratio: float=4.0, drop_path: float=0.0, norm_layer: Union[nn.Module, str]='LayerNorm', q_stride: Tuple[int, int]=None, act_layer: nn.Module=nn.GELU, window_size: int=0): |
|
super().__init__() |
|
if isinstance(norm_layer, str): |
|
norm_layer = partial(getattr(nn, norm_layer), eps=1e-06) |
|
self.dim = dim |
|
self.dim_out = dim_out |
|
self.norm1 = norm_layer(dim) |
|
self.window_size = window_size |
|
(self.pool, self.q_stride) = (None, q_stride) |
|
if self.q_stride: |
|
self.pool = nn.MaxPool2d(kernel_size=q_stride, stride=q_stride, ceil_mode=False) |
|
self.attn = MultiScaleAttention(dim, dim_out, num_heads=num_heads, q_pool=self.pool) |
|
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() |
|
self.norm2 = norm_layer(dim_out) |
|
self.mlp = MLP(dim_out, int(dim_out * mlp_ratio), dim_out, num_layers=2, activation=act_layer) |
|
if dim != dim_out: |
|
self.proj = nn.Linear(dim, dim_out) |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
shortcut = x |
|
x = self.norm1(x) |
|
if self.dim != self.dim_out: |
|
shortcut = do_pool(self.proj(x), self.pool) |
|
window_size = self.window_size |
|
if window_size > 0: |
|
(H, W) = (x.shape[1], x.shape[2]) |
|
(x, pad_hw) = window_partition(x, window_size) |
|
x = self.attn(x) |
|
if self.q_stride: |
|
window_size = self.window_size // self.q_stride[0] |
|
(H, W) = shortcut.shape[1:3] |
|
pad_h = (window_size - H % window_size) % window_size |
|
pad_w = (window_size - W % window_size) % window_size |
|
pad_hw = (H + pad_h, W + pad_w) |
|
if self.window_size > 0: |
|
x = window_unpartition(x, window_size, pad_hw, (H, W)) |
|
x = shortcut + self.drop_path(x) |
|
x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
return x |
|
|
|
class Hiera(nn.Module): |
|
|
|
def __init__(self, embed_dim: int=96, num_heads: int=1, drop_path_rate: float=0.0, q_pool: int=3, q_stride: Tuple[int, int]=(2, 2), stages: Tuple[int, ...]=(2, 3, 16, 3), dim_mul: float=2.0, head_mul: float=2.0, window_pos_embed_bkg_spatial_size: Tuple[int, int]=(14, 14), window_spec: Tuple[int, ...]=(8, 4, 14, 7), global_att_blocks: Tuple[int, ...]=(12, 16, 20), return_interm_layers=True): |
|
super().__init__() |
|
assert len(stages) == len(window_spec) |
|
self.window_spec = window_spec |
|
depth = sum(stages) |
|
self.q_stride = q_stride |
|
self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] |
|
assert 0 <= q_pool <= len(self.stage_ends[:-1]) |
|
self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool] |
|
self.return_interm_layers = return_interm_layers |
|
self.patch_embed = PatchEmbed(embed_dim=embed_dim) |
|
self.global_att_blocks = global_att_blocks |
|
self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size |
|
self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size)) |
|
self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])) |
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] |
|
cur_stage = 1 |
|
self.blocks = nn.ModuleList() |
|
for i in range(depth): |
|
dim_out = embed_dim |
|
window_size = self.window_spec[cur_stage - 1] |
|
if self.global_att_blocks is not None: |
|
window_size = 0 if i in self.global_att_blocks else window_size |
|
if i - 1 in self.stage_ends: |
|
dim_out = int(embed_dim * dim_mul) |
|
num_heads = int(num_heads * head_mul) |
|
cur_stage += 1 |
|
block = MultiScaleBlock(dim=embed_dim, dim_out=dim_out, num_heads=num_heads, drop_path=dpr[i], q_stride=self.q_stride if i in self.q_pool_blocks else None, window_size=window_size) |
|
embed_dim = dim_out |
|
self.blocks.append(block) |
|
self.channel_list = [self.blocks[i].dim_out for i in self.stage_ends[::-1]] if return_interm_layers else [self.blocks[-1].dim_out] |
|
|
|
def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor: |
|
(h, w) = hw |
|
window_embed = self.pos_embed_window |
|
pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode='bicubic') |
|
tiles = [x // y for (x, y) in zip(pos_embed.shape, window_embed.shape)] |
|
pos_embed = pos_embed + window_embed.tile(tiles) |
|
pos_embed = pos_embed.permute(0, 2, 3, 1) |
|
return pos_embed |
|
|
|
def forward(self, x: torch.Tensor) -> List[torch.Tensor]: |
|
x = self.patch_embed(x) |
|
x = x + self._get_pos_embed(x.shape[1:3]) |
|
outputs = [] |
|
for (i, blk) in enumerate(self.blocks): |
|
x = blk(x) |
|
if i == self.stage_ends[-1] or (i in self.stage_ends and self.return_interm_layers): |
|
feats = x.permute(0, 3, 1, 2) |
|
outputs.append(feats) |
|
return outputs |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/backbones/image_encoder.py |
|
from typing import List, Optional |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
class ImageEncoder(nn.Module): |
|
|
|
def __init__(self, trunk: nn.Module, neck: nn.Module, scalp: int=0): |
|
super().__init__() |
|
self.trunk = trunk |
|
self.neck = neck |
|
self.scalp = scalp |
|
assert self.trunk.channel_list == self.neck.backbone_channel_list, f'Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}' |
|
|
|
def forward(self, sample: torch.Tensor): |
|
(features, pos) = self.neck(self.trunk(sample)) |
|
if self.scalp > 0: |
|
(features, pos) = (features[:-self.scalp], pos[:-self.scalp]) |
|
src = features[-1] |
|
output = {'vision_features': src, 'vision_pos_enc': pos, 'backbone_fpn': features} |
|
return output |
|
|
|
class FpnNeck(nn.Module): |
|
|
|
def __init__(self, position_encoding: nn.Module, d_model: int, backbone_channel_list: List[int], kernel_size: int=1, stride: int=1, padding: int=0, fpn_interp_model: str='bilinear', fuse_type: str='sum', fpn_top_down_levels: Optional[List[int]]=None): |
|
super().__init__() |
|
self.position_encoding = position_encoding |
|
self.convs = nn.ModuleList() |
|
self.backbone_channel_list = backbone_channel_list |
|
for dim in backbone_channel_list: |
|
current = nn.Sequential() |
|
current.add_module('conv', nn.Conv2d(in_channels=dim, out_channels=d_model, kernel_size=kernel_size, stride=stride, padding=padding)) |
|
self.convs.append(current) |
|
self.fpn_interp_model = fpn_interp_model |
|
assert fuse_type in ['sum', 'avg'] |
|
self.fuse_type = fuse_type |
|
if fpn_top_down_levels is None: |
|
fpn_top_down_levels = range(len(self.convs)) |
|
self.fpn_top_down_levels = list(fpn_top_down_levels) |
|
|
|
def forward(self, xs: List[torch.Tensor]): |
|
out = [None] * len(self.convs) |
|
pos = [None] * len(self.convs) |
|
assert len(xs) == len(self.convs) |
|
prev_features = None |
|
n = len(self.convs) - 1 |
|
for i in range(n, -1, -1): |
|
x = xs[i] |
|
lateral_features = self.convs[n - i](x) |
|
if i in self.fpn_top_down_levels and prev_features is not None: |
|
top_down_features = F.interpolate(prev_features.to(dtype=torch.float32), scale_factor=2.0, mode=self.fpn_interp_model, align_corners=None if self.fpn_interp_model == 'nearest' else False, antialias=False) |
|
prev_features = lateral_features + top_down_features |
|
if self.fuse_type == 'avg': |
|
prev_features /= 2 |
|
else: |
|
prev_features = lateral_features |
|
x_out = prev_features |
|
out[i] = x_out |
|
pos[i] = self.position_encoding(x_out).to(x_out.dtype) |
|
return (out, pos) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/backbones/utils.py |
|
"""""" |
|
from typing import Tuple |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
def window_partition(x, window_size): |
|
(B, H, W, C) = x.shape |
|
pad_h = (window_size - H % window_size) % window_size |
|
pad_w = (window_size - W % window_size) % window_size |
|
if pad_h > 0 or pad_w > 0: |
|
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) |
|
(Hp, Wp) = (H + pad_h, W + pad_w) |
|
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) |
|
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) |
|
return (windows, (Hp, Wp)) |
|
|
|
def window_unpartition(windows, window_size, pad_hw, hw): |
|
(Hp, Wp) = pad_hw |
|
(H, W) = hw |
|
B = windows.shape[0] // (Hp * Wp // window_size // window_size) |
|
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) |
|
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) |
|
if Hp > H or Wp > W: |
|
x = x[:, :H, :W, :].contiguous() |
|
return x |
|
|
|
class PatchEmbed(nn.Module): |
|
|
|
def __init__(self, kernel_size: Tuple[int, ...]=(7, 7), stride: Tuple[int, ...]=(4, 4), padding: Tuple[int, ...]=(3, 3), in_chans: int=3, embed_dim: int=768): |
|
super().__init__() |
|
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
x = self.proj(x) |
|
x = x.permute(0, 2, 3, 1) |
|
return x |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/memory_attention.py |
|
from typing import Optional |
|
import torch |
|
from torch import nn, Tensor |
|
from sam2.modeling.sam.transformer import RoPEAttention |
|
from sam2.modeling.sam2_utils import get_activation_fn, get_clones |
|
|
|
class MemoryAttentionLayer(nn.Module): |
|
|
|
def __init__(self, activation: str, cross_attention: nn.Module, d_model: int, dim_feedforward: int, dropout: float, pos_enc_at_attn: bool, pos_enc_at_cross_attn_keys: bool, pos_enc_at_cross_attn_queries: bool, self_attention: nn.Module): |
|
super().__init__() |
|
self.d_model = d_model |
|
self.dim_feedforward = dim_feedforward |
|
self.dropout_value = dropout |
|
self.self_attn = self_attention |
|
self.cross_attn_image = cross_attention |
|
self.linear1 = nn.Linear(d_model, dim_feedforward) |
|
self.dropout = nn.Dropout(dropout) |
|
self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.norm3 = nn.LayerNorm(d_model) |
|
self.dropout1 = nn.Dropout(dropout) |
|
self.dropout2 = nn.Dropout(dropout) |
|
self.dropout3 = nn.Dropout(dropout) |
|
self.activation_str = activation |
|
self.activation = get_activation_fn(activation) |
|
self.pos_enc_at_attn = pos_enc_at_attn |
|
self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries |
|
self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys |
|
|
|
def _forward_sa(self, tgt, query_pos): |
|
tgt2 = self.norm1(tgt) |
|
q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 |
|
tgt2 = self.self_attn(q, k, v=tgt2) |
|
tgt = tgt + self.dropout1(tgt2) |
|
return tgt |
|
|
|
def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): |
|
kwds = {} |
|
if num_k_exclude_rope > 0: |
|
assert isinstance(self.cross_attn_image, RoPEAttention) |
|
kwds = {'num_k_exclude_rope': num_k_exclude_rope} |
|
tgt2 = self.norm2(tgt) |
|
tgt2 = self.cross_attn_image(q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, v=memory, **kwds) |
|
tgt = tgt + self.dropout2(tgt2) |
|
return tgt |
|
|
|
def forward(self, tgt, memory, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None, num_k_exclude_rope: int=0) -> torch.Tensor: |
|
tgt = self._forward_sa(tgt, query_pos) |
|
tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) |
|
tgt2 = self.norm3(tgt) |
|
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) |
|
tgt = tgt + self.dropout3(tgt2) |
|
return tgt |
|
|
|
class MemoryAttention(nn.Module): |
|
|
|
def __init__(self, d_model: int, pos_enc_at_input: bool, layer: nn.Module, num_layers: int, batch_first: bool=True): |
|
super().__init__() |
|
self.d_model = d_model |
|
self.layers = get_clones(layer, num_layers) |
|
self.num_layers = num_layers |
|
self.norm = nn.LayerNorm(d_model) |
|
self.pos_enc_at_input = pos_enc_at_input |
|
self.batch_first = batch_first |
|
|
|
def forward(self, curr: torch.Tensor, memory: torch.Tensor, curr_pos: Optional[Tensor]=None, memory_pos: Optional[Tensor]=None, num_obj_ptr_tokens: int=0): |
|
if isinstance(curr, list): |
|
assert isinstance(curr_pos, list) |
|
assert len(curr) == len(curr_pos) == 1 |
|
(curr, curr_pos) = (curr[0], curr_pos[0]) |
|
assert curr.shape[1] == memory.shape[1], 'Batch size must be the same for curr and memory' |
|
output = curr |
|
if self.pos_enc_at_input and curr_pos is not None: |
|
output = output + 0.1 * curr_pos |
|
if self.batch_first: |
|
output = output.transpose(0, 1) |
|
curr_pos = curr_pos.transpose(0, 1) |
|
memory = memory.transpose(0, 1) |
|
memory_pos = memory_pos.transpose(0, 1) |
|
for layer in self.layers: |
|
kwds = {} |
|
if isinstance(layer.cross_attn_image, RoPEAttention): |
|
kwds = {'num_k_exclude_rope': num_obj_ptr_tokens} |
|
output = layer(tgt=output, memory=memory, pos=memory_pos, query_pos=curr_pos, **kwds) |
|
normed_output = self.norm(output) |
|
if self.batch_first: |
|
normed_output = normed_output.transpose(0, 1) |
|
curr_pos = curr_pos.transpose(0, 1) |
|
return normed_output |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/memory_encoder.py |
|
import math |
|
from typing import Tuple |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d |
|
|
|
class MaskDownSampler(nn.Module): |
|
|
|
def __init__(self, embed_dim=256, kernel_size=4, stride=4, padding=0, total_stride=16, activation=nn.GELU): |
|
super().__init__() |
|
num_layers = int(math.log2(total_stride) // math.log2(stride)) |
|
assert stride ** num_layers == total_stride |
|
self.encoder = nn.Sequential() |
|
(mask_in_chans, mask_out_chans) = (1, 1) |
|
for _ in range(num_layers): |
|
mask_out_chans = mask_in_chans * stride ** 2 |
|
self.encoder.append(nn.Conv2d(mask_in_chans, mask_out_chans, kernel_size=kernel_size, stride=stride, padding=padding)) |
|
self.encoder.append(LayerNorm2d(mask_out_chans)) |
|
self.encoder.append(activation()) |
|
mask_in_chans = mask_out_chans |
|
self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) |
|
|
|
def forward(self, x): |
|
return self.encoder(x) |
|
|
|
class CXBlock(nn.Module): |
|
|
|
def __init__(self, dim, kernel_size=7, padding=3, drop_path=0.0, layer_scale_init_value=1e-06, use_dwconv=True): |
|
super().__init__() |
|
self.dwconv = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=padding, groups=dim if use_dwconv else 1) |
|
self.norm = LayerNorm2d(dim, eps=1e-06) |
|
self.pwconv1 = nn.Linear(dim, 4 * dim) |
|
self.act = nn.GELU() |
|
self.pwconv2 = nn.Linear(4 * dim, dim) |
|
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True) if layer_scale_init_value > 0 else None |
|
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() |
|
|
|
def forward(self, x): |
|
input = x |
|
x = self.dwconv(x) |
|
x = self.norm(x) |
|
x = x.permute(0, 2, 3, 1) |
|
x = self.pwconv1(x) |
|
x = self.act(x) |
|
x = self.pwconv2(x) |
|
if self.gamma is not None: |
|
x = self.gamma * x |
|
x = x.permute(0, 3, 1, 2) |
|
x = input + self.drop_path(x) |
|
return x |
|
|
|
class Fuser(nn.Module): |
|
|
|
def __init__(self, layer, num_layers, dim=None, input_projection=False): |
|
super().__init__() |
|
self.proj = nn.Identity() |
|
self.layers = get_clones(layer, num_layers) |
|
if input_projection: |
|
assert dim is not None |
|
self.proj = nn.Conv2d(dim, dim, kernel_size=1) |
|
|
|
def forward(self, x): |
|
x = self.proj(x) |
|
for layer in self.layers: |
|
x = layer(x) |
|
return x |
|
|
|
class MemoryEncoder(nn.Module): |
|
|
|
def __init__(self, out_dim, mask_downsampler, fuser, position_encoding, in_dim=256): |
|
super().__init__() |
|
self.mask_downsampler = mask_downsampler |
|
self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) |
|
self.fuser = fuser |
|
self.position_encoding = position_encoding |
|
self.out_proj = nn.Identity() |
|
if out_dim != in_dim: |
|
self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) |
|
|
|
def forward(self, pix_feat: torch.Tensor, masks: torch.Tensor, skip_mask_sigmoid: bool=False) -> Tuple[torch.Tensor, torch.Tensor]: |
|
if not skip_mask_sigmoid: |
|
masks = F.sigmoid(masks) |
|
masks = self.mask_downsampler(masks) |
|
pix_feat = pix_feat.to(masks.device) |
|
x = self.pix_feat_proj(pix_feat) |
|
x = x + masks |
|
x = self.fuser(x) |
|
x = self.out_proj(x) |
|
pos = self.position_encoding(x).to(x.dtype) |
|
return {'vision_features': x, 'vision_pos_enc': [pos]} |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/position_encoding.py |
|
import math |
|
from typing import Any, Optional, Tuple |
|
import numpy as np |
|
import torch |
|
from torch import nn |
|
|
|
class PositionEmbeddingSine(nn.Module): |
|
|
|
def __init__(self, num_pos_feats, temperature: int=10000, normalize: bool=True, scale: Optional[float]=None): |
|
super().__init__() |
|
assert num_pos_feats % 2 == 0, 'Expecting even model width' |
|
self.num_pos_feats = num_pos_feats // 2 |
|
self.temperature = temperature |
|
self.normalize = normalize |
|
if scale is not None and normalize is False: |
|
raise ValueError('normalize should be True if scale is passed') |
|
if scale is None: |
|
scale = 2 * math.pi |
|
self.scale = scale |
|
self.cache = {} |
|
|
|
def _encode_xy(self, x, y): |
|
assert len(x) == len(y) and x.ndim == y.ndim == 1 |
|
x_embed = x * self.scale |
|
y_embed = y * self.scale |
|
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) |
|
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) |
|
pos_x = x_embed[:, None] / dim_t |
|
pos_y = y_embed[:, None] / dim_t |
|
pos_x = torch.stack((pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2).flatten(1) |
|
pos_y = torch.stack((pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2).flatten(1) |
|
return (pos_x, pos_y) |
|
|
|
@torch.no_grad() |
|
def encode_boxes(self, x, y, w, h): |
|
(pos_x, pos_y) = self._encode_xy(x, y) |
|
pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) |
|
return pos |
|
encode = encode_boxes |
|
|
|
@torch.no_grad() |
|
def encode_points(self, x, y, labels): |
|
((bx, nx), (by, ny), (bl, nl)) = (x.shape, y.shape, labels.shape) |
|
assert bx == by and nx == ny and (bx == bl) and (nx == nl) |
|
(pos_x, pos_y) = self._encode_xy(x.flatten(), y.flatten()) |
|
(pos_x, pos_y) = (pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)) |
|
pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) |
|
return pos |
|
|
|
@torch.no_grad() |
|
def forward(self, x: torch.Tensor): |
|
cache_key = (x.shape[-2], x.shape[-1]) |
|
if cache_key in self.cache: |
|
return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1) |
|
y_embed = torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device).view(1, -1, 1).repeat(x.shape[0], 1, x.shape[-1]) |
|
x_embed = torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device).view(1, 1, -1).repeat(x.shape[0], x.shape[-2], 1) |
|
if self.normalize: |
|
eps = 1e-06 |
|
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale |
|
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale |
|
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) |
|
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) |
|
pos_x = x_embed[:, :, :, None] / dim_t |
|
pos_y = y_embed[:, :, :, None] / dim_t |
|
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) |
|
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) |
|
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) |
|
self.cache[cache_key] = pos[0] |
|
return pos |
|
|
|
class PositionEmbeddingRandom(nn.Module): |
|
|
|
def __init__(self, num_pos_feats: int=64, scale: Optional[float]=None) -> None: |
|
super().__init__() |
|
if scale is None or scale <= 0.0: |
|
scale = 1.0 |
|
self.register_buffer('positional_encoding_gaussian_matrix', scale * torch.randn((2, num_pos_feats))) |
|
|
|
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: |
|
coords = 2 * coords - 1 |
|
coords = coords @ self.positional_encoding_gaussian_matrix |
|
coords = 2 * np.pi * coords |
|
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) |
|
|
|
def forward(self, size: Tuple[int, int]) -> torch.Tensor: |
|
(h, w) = size |
|
device: Any = self.positional_encoding_gaussian_matrix.device |
|
grid = torch.ones((h, w), device=device, dtype=torch.float32) |
|
y_embed = grid.cumsum(dim=0) - 0.5 |
|
x_embed = grid.cumsum(dim=1) - 0.5 |
|
y_embed = y_embed / h |
|
x_embed = x_embed / w |
|
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) |
|
return pe.permute(2, 0, 1) |
|
|
|
def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: |
|
coords = coords_input.clone() |
|
coords[:, :, 0] = coords[:, :, 0] / image_size[1] |
|
coords[:, :, 1] = coords[:, :, 1] / image_size[0] |
|
return self._pe_encoding(coords.to(torch.float)) |
|
|
|
def init_t_xy(end_x: int, end_y: int): |
|
t = torch.arange(end_x * end_y, dtype=torch.float32) |
|
t_x = (t % end_x).float() |
|
t_y = torch.div(t, end_x, rounding_mode='floor').float() |
|
return (t_x, t_y) |
|
|
|
def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float=10000.0): |
|
freqs_x = 1.0 / theta ** (torch.arange(0, dim, 4)[:dim // 4].float() / dim) |
|
freqs_y = 1.0 / theta ** (torch.arange(0, dim, 4)[:dim // 4].float() / dim) |
|
(t_x, t_y) = init_t_xy(end_x, end_y) |
|
freqs_x = torch.outer(t_x, freqs_x) |
|
freqs_y = torch.outer(t_y, freqs_y) |
|
freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x) |
|
freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y) |
|
return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1) |
|
|
|
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): |
|
ndim = x.ndim |
|
assert 0 <= 1 < ndim |
|
assert freqs_cis.shape == (x.shape[-2], x.shape[-1]) |
|
shape = [d if i >= ndim - 2 else 1 for (i, d) in enumerate(x.shape)] |
|
return freqs_cis.view(*shape) |
|
|
|
def apply_rotary_enc(xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor, repeat_freqs_k: bool=False): |
|
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) |
|
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) if xk.shape[-2] != 0 else None |
|
freqs_cis = reshape_for_broadcast(freqs_cis, xq_) |
|
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) |
|
if xk_ is None: |
|
return (xq_out.type_as(xq).to(xq.device), xk) |
|
if repeat_freqs_k: |
|
r = xk_.shape[-2] // xq_.shape[-2] |
|
if freqs_cis.is_cuda: |
|
freqs_cis = freqs_cis.repeat(*[1] * (freqs_cis.ndim - 2), r, 1) |
|
else: |
|
freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3) |
|
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) |
|
return (xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/sam/mask_decoder.py |
|
from typing import List, Optional, Tuple, Type |
|
import torch |
|
from torch import nn |
|
from sam2.modeling.sam2_utils import LayerNorm2d, MLP |
|
|
|
class MaskDecoder(nn.Module): |
|
|
|
def __init__(self, *, transformer_dim: int, transformer: nn.Module, num_multimask_outputs: int=3, activation: Type[nn.Module]=nn.GELU, iou_head_depth: int=3, iou_head_hidden_dim: int=256, use_high_res_features: bool=False, iou_prediction_use_sigmoid=False, dynamic_multimask_via_stability=False, dynamic_multimask_stability_delta=0.05, dynamic_multimask_stability_thresh=0.98, pred_obj_scores: bool=False, pred_obj_scores_mlp: bool=False, use_multimask_token_for_obj_ptr: bool=False) -> None: |
|
super().__init__() |
|
self.transformer_dim = transformer_dim |
|
self.transformer = transformer |
|
self.num_multimask_outputs = num_multimask_outputs |
|
self.iou_token = nn.Embedding(1, transformer_dim) |
|
self.num_mask_tokens = num_multimask_outputs + 1 |
|
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) |
|
self.pred_obj_scores = pred_obj_scores |
|
if self.pred_obj_scores: |
|
self.obj_score_token = nn.Embedding(1, transformer_dim) |
|
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr |
|
self.output_upscaling = nn.Sequential(nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), LayerNorm2d(transformer_dim // 4), activation(), nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), activation()) |
|
self.use_high_res_features = use_high_res_features |
|
if use_high_res_features: |
|
self.conv_s0 = nn.Conv2d(transformer_dim, transformer_dim // 8, kernel_size=1, stride=1) |
|
self.conv_s1 = nn.Conv2d(transformer_dim, transformer_dim // 4, kernel_size=1, stride=1) |
|
self.output_hypernetworks_mlps = nn.ModuleList([MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)]) |
|
self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth, sigmoid_output=iou_prediction_use_sigmoid) |
|
if self.pred_obj_scores: |
|
self.pred_obj_score_head = nn.Linear(transformer_dim, 1) |
|
if pred_obj_scores_mlp: |
|
self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3) |
|
self.dynamic_multimask_via_stability = dynamic_multimask_via_stability |
|
self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta |
|
self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh |
|
|
|
def forward(self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, repeat_image: bool, high_res_features: Optional[List[torch.Tensor]]=None) -> Tuple[torch.Tensor, torch.Tensor]: |
|
(masks, iou_pred, mask_tokens_out, object_score_logits) = self.predict_masks(image_embeddings=image_embeddings, image_pe=image_pe, sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings, repeat_image=repeat_image, high_res_features=high_res_features) |
|
if multimask_output: |
|
masks = masks[:, 1:, :, :] |
|
iou_pred = iou_pred[:, 1:] |
|
elif self.dynamic_multimask_via_stability and (not self.training): |
|
(masks, iou_pred) = self._dynamic_multimask_via_stability(masks, iou_pred) |
|
else: |
|
masks = masks[:, 0:1, :, :] |
|
iou_pred = iou_pred[:, 0:1] |
|
if multimask_output and self.use_multimask_token_for_obj_ptr: |
|
sam_tokens_out = mask_tokens_out[:, 1:] |
|
else: |
|
sam_tokens_out = mask_tokens_out[:, 0:1] |
|
return (masks, iou_pred, sam_tokens_out, object_score_logits) |
|
|
|
def predict_masks(self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, repeat_image: bool, high_res_features: Optional[List[torch.Tensor]]=None) -> Tuple[torch.Tensor, torch.Tensor]: |
|
s = 0 |
|
if self.pred_obj_scores: |
|
output_tokens = torch.cat([self.obj_score_token.weight, self.iou_token.weight, self.mask_tokens.weight], dim=0) |
|
s = 1 |
|
else: |
|
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) |
|
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) |
|
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) |
|
if repeat_image: |
|
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) |
|
else: |
|
assert image_embeddings.shape[0] == tokens.shape[0] |
|
src = image_embeddings |
|
src = src + dense_prompt_embeddings |
|
assert image_pe.size(0) == 1, 'image_pe should have size 1 in batch dim (from `get_dense_pe()`)' |
|
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) |
|
(b, c, h, w) = src.shape |
|
(hs, src) = self.transformer(src, pos_src, tokens) |
|
iou_token_out = hs[:, s, :] |
|
mask_tokens_out = hs[:, s + 1:s + 1 + self.num_mask_tokens, :] |
|
src = src.transpose(1, 2).view(b, c, h, w) |
|
if not self.use_high_res_features: |
|
upscaled_embedding = self.output_upscaling(src) |
|
else: |
|
(dc1, ln1, act1, dc2, act2) = self.output_upscaling |
|
(feat_s0, feat_s1) = high_res_features |
|
upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) |
|
upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) |
|
hyper_in_list: List[torch.Tensor] = [] |
|
for i in range(self.num_mask_tokens): |
|
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) |
|
hyper_in = torch.stack(hyper_in_list, dim=1) |
|
(b, c, h, w) = upscaled_embedding.shape |
|
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) |
|
iou_pred = self.iou_prediction_head(iou_token_out) |
|
if self.pred_obj_scores: |
|
assert s == 1 |
|
object_score_logits = self.pred_obj_score_head(hs[:, 0, :]) |
|
else: |
|
object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1) |
|
return (masks, iou_pred, mask_tokens_out, object_score_logits) |
|
|
|
def _get_stability_scores(self, mask_logits): |
|
mask_logits = mask_logits.flatten(-2) |
|
stability_delta = self.dynamic_multimask_stability_delta |
|
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float() |
|
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float() |
|
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0) |
|
return stability_scores |
|
|
|
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores): |
|
multimask_logits = all_mask_logits[:, 1:, :, :] |
|
multimask_iou_scores = all_iou_scores[:, 1:] |
|
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) |
|
batch_inds = torch.arange(multimask_iou_scores.size(0), device=all_iou_scores.device) |
|
best_multimask_logits = multimask_logits[batch_inds, best_scores_inds] |
|
best_multimask_logits = best_multimask_logits.unsqueeze(1) |
|
best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds] |
|
best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1) |
|
singlemask_logits = all_mask_logits[:, 0:1, :, :] |
|
singlemask_iou_scores = all_iou_scores[:, 0:1] |
|
stability_scores = self._get_stability_scores(singlemask_logits) |
|
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh |
|
mask_logits_out = torch.where(is_stable[..., None, None].expand_as(singlemask_logits), singlemask_logits, best_multimask_logits) |
|
iou_scores_out = torch.where(is_stable.expand_as(singlemask_iou_scores), singlemask_iou_scores, best_multimask_iou_scores) |
|
return (mask_logits_out, iou_scores_out) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/sam/prompt_encoder.py |
|
from typing import Optional, Tuple, Type |
|
import torch |
|
from torch import nn |
|
from sam2.modeling.position_encoding import PositionEmbeddingRandom |
|
from sam2.modeling.sam2_utils import LayerNorm2d |
|
|
|
class PromptEncoder(nn.Module): |
|
|
|
def __init__(self, embed_dim: int, image_embedding_size: Tuple[int, int], input_image_size: Tuple[int, int], mask_in_chans: int, activation: Type[nn.Module]=nn.GELU) -> None: |
|
super().__init__() |
|
self.embed_dim = embed_dim |
|
self.input_image_size = input_image_size |
|
self.image_embedding_size = image_embedding_size |
|
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) |
|
self.num_point_embeddings: int = 4 |
|
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] |
|
self.point_embeddings = nn.ModuleList(point_embeddings) |
|
self.not_a_point_embed = nn.Embedding(1, embed_dim) |
|
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) |
|
self.mask_downscaling = nn.Sequential(nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans // 4), activation(), nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans), activation(), nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1)) |
|
self.no_mask_embed = nn.Embedding(1, embed_dim) |
|
|
|
def get_dense_pe(self) -> torch.Tensor: |
|
return self.pe_layer(self.image_embedding_size).unsqueeze(0) |
|
|
|
def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: |
|
points = points + 0.5 |
|
if pad: |
|
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) |
|
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) |
|
points = torch.cat([points, padding_point], dim=1) |
|
labels = torch.cat([labels, padding_label], dim=1) |
|
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) |
|
mask_not_a_point = (labels == -1).float().unsqueeze(-1) |
|
mask_label_0 = (labels == 0).float().unsqueeze(-1) |
|
mask_label_1 = (labels == 1).float().unsqueeze(-1) |
|
mask_label_2 = (labels == 2).float().unsqueeze(-1) |
|
mask_label_3 = (labels == 3).float().unsqueeze(-1) |
|
point_embedding = point_embedding * (1 - mask_not_a_point) + self.not_a_point_embed.weight * mask_not_a_point + self.point_embeddings[0].weight * mask_label_0 + self.point_embeddings[1].weight * mask_label_1 + self.point_embeddings[2].weight * mask_label_2 + self.point_embeddings[3].weight * mask_label_3 |
|
return point_embedding |
|
|
|
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: |
|
boxes = boxes + 0.5 |
|
coords = boxes.reshape(-1, 2, 2) |
|
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) |
|
corner_embedding[:, 0, :] += self.point_embeddings[2].weight |
|
corner_embedding[:, 1, :] += self.point_embeddings[3].weight |
|
return corner_embedding |
|
|
|
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: |
|
mask_embedding = self.mask_downscaling(masks) |
|
return mask_embedding |
|
|
|
def _get_batch_size(self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor]) -> int: |
|
if points is not None: |
|
return points[0].shape[0] |
|
elif boxes is not None: |
|
return boxes.shape[0] |
|
elif masks is not None: |
|
return masks.shape[0] |
|
else: |
|
return 1 |
|
|
|
def _get_device(self) -> torch.device: |
|
return self.point_embeddings[0].weight.device |
|
|
|
def forward(self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: |
|
bs = self._get_batch_size(points, boxes, masks) |
|
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) |
|
if points is not None: |
|
(coords, labels) = points |
|
point_embeddings = self._embed_points(coords, labels, pad=boxes is None) |
|
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) |
|
if boxes is not None: |
|
box_embeddings = self._embed_boxes(boxes) |
|
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) |
|
if masks is not None: |
|
dense_embeddings = self._embed_masks(masks) |
|
else: |
|
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]) |
|
return (sparse_embeddings, dense_embeddings) |
|
|
|
def points_only(self, points: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: |
|
(coords, labels) = points |
|
sparse_embeddings = self._embed_points(coords, labels, pad=True) |
|
bs = points[0].shape[0] |
|
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]) |
|
return (sparse_embeddings, dense_embeddings) |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/sam/transformer.py |
|
import contextlib |
|
import math |
|
import warnings |
|
from functools import partial |
|
from typing import Tuple, Type |
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn, Tensor |
|
from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis |
|
from sam2.modeling.sam2_utils import MLP |
|
from sam2.utils.misc import get_sdpa_settings |
|
warnings.simplefilter(action='ignore', category=FutureWarning) |
|
(OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON) = get_sdpa_settings() |
|
ALLOW_ALL_KERNELS = False |
|
|
|
def sdp_kernel_context(dropout_p): |
|
if ALLOW_ALL_KERNELS: |
|
return contextlib.nullcontext() |
|
return torch.backends.cuda.sdp_kernel(enable_flash=USE_FLASH_ATTN, enable_math=OLD_GPU and dropout_p > 0.0 or MATH_KERNEL_ON, enable_mem_efficient=OLD_GPU) |
|
|
|
class TwoWayTransformer(nn.Module): |
|
|
|
def __init__(self, depth: int, embedding_dim: int, num_heads: int, mlp_dim: int, activation: Type[nn.Module]=nn.ReLU, attention_downsample_rate: int=2) -> None: |
|
super().__init__() |
|
self.depth = depth |
|
self.embedding_dim = embedding_dim |
|
self.num_heads = num_heads |
|
self.mlp_dim = mlp_dim |
|
self.layers = nn.ModuleList() |
|
for i in range(depth): |
|
self.layers.append(TwoWayAttentionBlock(embedding_dim=embedding_dim, num_heads=num_heads, mlp_dim=mlp_dim, activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=i == 0)) |
|
self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) |
|
self.norm_final_attn = nn.LayerNorm(embedding_dim) |
|
|
|
def forward(self, image_embedding: Tensor, image_pe: Tensor, point_embedding: Tensor) -> Tuple[Tensor, Tensor]: |
|
(bs, c, h, w) = image_embedding.shape |
|
image_embedding = image_embedding.flatten(2).permute(0, 2, 1) |
|
image_pe = image_pe.flatten(2).permute(0, 2, 1) |
|
queries = point_embedding |
|
keys = image_embedding |
|
for layer in self.layers: |
|
(queries, keys) = layer(queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe) |
|
q = queries + point_embedding |
|
k = keys + image_pe |
|
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) |
|
queries = queries + attn_out |
|
queries = self.norm_final_attn(queries) |
|
return (queries, keys) |
|
|
|
class TwoWayAttentionBlock(nn.Module): |
|
|
|
def __init__(self, embedding_dim: int, num_heads: int, mlp_dim: int=2048, activation: Type[nn.Module]=nn.ReLU, attention_downsample_rate: int=2, skip_first_layer_pe: bool=False) -> None: |
|
super().__init__() |
|
self.self_attn = Attention(embedding_dim, num_heads) |
|
self.norm1 = nn.LayerNorm(embedding_dim) |
|
self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) |
|
self.norm2 = nn.LayerNorm(embedding_dim) |
|
self.mlp = MLP(embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation) |
|
self.norm3 = nn.LayerNorm(embedding_dim) |
|
self.norm4 = nn.LayerNorm(embedding_dim) |
|
self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) |
|
self.skip_first_layer_pe = skip_first_layer_pe |
|
|
|
def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]: |
|
if self.skip_first_layer_pe: |
|
queries = self.self_attn(q=queries, k=queries, v=queries) |
|
else: |
|
q = queries + query_pe |
|
attn_out = self.self_attn(q=q, k=q, v=queries) |
|
queries = queries + attn_out |
|
queries = self.norm1(queries) |
|
q = queries + query_pe |
|
k = keys + key_pe |
|
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) |
|
queries = queries + attn_out |
|
queries = self.norm2(queries) |
|
mlp_out = self.mlp(queries) |
|
queries = queries + mlp_out |
|
queries = self.norm3(queries) |
|
q = queries + query_pe |
|
k = keys + key_pe |
|
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) |
|
keys = keys + attn_out |
|
keys = self.norm4(keys) |
|
return (queries, keys) |
|
|
|
class Attention(nn.Module): |
|
|
|
def __init__(self, embedding_dim: int, num_heads: int, downsample_rate: int=1, dropout: float=0.0, kv_in_dim: int=None) -> None: |
|
super().__init__() |
|
self.embedding_dim = embedding_dim |
|
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim |
|
self.internal_dim = embedding_dim // downsample_rate |
|
self.num_heads = num_heads |
|
assert self.internal_dim % num_heads == 0, 'num_heads must divide embedding_dim.' |
|
self.q_proj = nn.Linear(embedding_dim, self.internal_dim) |
|
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim) |
|
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim) |
|
self.out_proj = nn.Linear(self.internal_dim, embedding_dim) |
|
self.dropout_p = dropout |
|
|
|
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: |
|
(b, n, c) = x.shape |
|
x = x.reshape(b, n, num_heads, c // num_heads) |
|
return x.transpose(1, 2) |
|
|
|
def _recombine_heads(self, x: Tensor) -> Tensor: |
|
(b, n_heads, n_tokens, c_per_head) = x.shape |
|
x = x.transpose(1, 2) |
|
return x.reshape(b, n_tokens, n_heads * c_per_head) |
|
|
|
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: |
|
q = self.q_proj(q) |
|
k = self.k_proj(k) |
|
v = self.v_proj(v) |
|
q = self._separate_heads(q, self.num_heads) |
|
k = self._separate_heads(k, self.num_heads) |
|
v = self._separate_heads(v, self.num_heads) |
|
dropout_p = self.dropout_p if self.training else 0.0 |
|
try: |
|
with sdp_kernel_context(dropout_p): |
|
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) |
|
except Exception as e: |
|
warnings.warn(f'Flash Attention kernel failed due to: {e}\nFalling back to all available kernels for scaled_dot_product_attention (which may have a slower speed).', category=UserWarning, stacklevel=2) |
|
global ALLOW_ALL_KERNELS |
|
ALLOW_ALL_KERNELS = True |
|
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) |
|
out = self._recombine_heads(out) |
|
out = self.out_proj(out) |
|
return out |
|
|
|
class RoPEAttention(Attention): |
|
|
|
def __init__(self, *args, rope_theta=10000.0, rope_k_repeat=False, feat_sizes=(32, 32), **kwargs): |
|
super().__init__(*args, **kwargs) |
|
self.compute_cis = partial(compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta) |
|
freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) |
|
self.freqs_cis = freqs_cis |
|
self.rope_k_repeat = rope_k_repeat |
|
|
|
def forward(self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int=0) -> Tensor: |
|
q = self.q_proj(q) |
|
k = self.k_proj(k) |
|
v = self.v_proj(v) |
|
q = self._separate_heads(q, self.num_heads) |
|
k = self._separate_heads(k, self.num_heads) |
|
v = self._separate_heads(v, self.num_heads) |
|
w = h = math.sqrt(q.shape[-2]) |
|
self.freqs_cis = self.freqs_cis.to(q.device) |
|
if self.freqs_cis.shape[0] != q.shape[-2]: |
|
self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) |
|
if q.shape[-2] != k.shape[-2]: |
|
assert self.rope_k_repeat |
|
num_k_rope = k.size(-2) - num_k_exclude_rope |
|
(q, k[:, :, :num_k_rope]) = apply_rotary_enc(q, k[:, :, :num_k_rope], freqs_cis=self.freqs_cis, repeat_freqs_k=self.rope_k_repeat) |
|
dropout_p = self.dropout_p if self.training else 0.0 |
|
try: |
|
with sdp_kernel_context(dropout_p): |
|
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) |
|
except Exception as e: |
|
warnings.warn(f'Flash Attention kernel failed due to: {e}\nFalling back to all available kernels for scaled_dot_product_attention (which may have a slower speed).', category=UserWarning, stacklevel=2) |
|
global ALLOW_ALL_KERNELS |
|
ALLOW_ALL_KERNELS = True |
|
out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) |
|
out = self._recombine_heads(out) |
|
out = self.out_proj(out) |
|
return out |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/sam2_base.py |
|
import torch |
|
import torch.distributed |
|
import torch.nn.functional as F |
|
from torch.nn.init import trunc_normal_ |
|
from sam2.modeling.sam.mask_decoder import MaskDecoder |
|
from sam2.modeling.sam.prompt_encoder import PromptEncoder |
|
from sam2.modeling.sam.transformer import TwoWayTransformer |
|
from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames |
|
NO_OBJ_SCORE = -1024.0 |
|
|
|
class SAM2Base(torch.nn.Module): |
|
|
|
def __init__(self, image_encoder, memory_attention, memory_encoder, num_maskmem=7, image_size=512, backbone_stride=16, sigmoid_scale_for_mem_enc=1.0, sigmoid_bias_for_mem_enc=0.0, binarize_mask_from_pts_for_mem_enc=False, use_mask_input_as_output_without_sam=False, max_cond_frames_in_attn=-1, directly_add_no_mem_embed=False, use_high_res_features_in_sam=False, multimask_output_in_sam=False, multimask_min_pt_num=1, multimask_max_pt_num=1, multimask_output_for_tracking=False, use_multimask_token_for_obj_ptr: bool=False, iou_prediction_use_sigmoid=False, memory_temporal_stride_for_eval=1, add_all_frames_to_correct_as_cond=False, non_overlap_masks_for_mem_enc=False, use_obj_ptrs_in_encoder=False, max_obj_ptrs_in_encoder=16, add_tpos_enc_to_obj_ptrs=True, proj_tpos_enc_in_obj_ptrs=False, only_obj_ptrs_in_the_past_for_eval=False, pred_obj_scores: bool=False, pred_obj_scores_mlp: bool=False, fixed_no_obj_ptr: bool=False, soft_no_obj_ptr: bool=False, use_mlp_for_obj_ptr_proj: bool=False, sam_mask_decoder_extra_args=None, compile_image_encoder: bool=False): |
|
super().__init__() |
|
self.image_encoder = image_encoder |
|
self.use_high_res_features_in_sam = use_high_res_features_in_sam |
|
self.num_feature_levels = 3 if use_high_res_features_in_sam else 1 |
|
self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder |
|
self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder |
|
if use_obj_ptrs_in_encoder: |
|
self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4) |
|
self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs |
|
if proj_tpos_enc_in_obj_ptrs: |
|
assert add_tpos_enc_to_obj_ptrs |
|
self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs |
|
self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval |
|
self.memory_attention = memory_attention |
|
self.hidden_dim = memory_attention.d_model |
|
self.memory_encoder = memory_encoder |
|
self.mem_dim = self.hidden_dim |
|
if hasattr(self.memory_encoder, 'out_proj') and hasattr(self.memory_encoder.out_proj, 'weight'): |
|
self.mem_dim = self.memory_encoder.out_proj.weight.shape[0] |
|
self.num_maskmem = num_maskmem |
|
self.maskmem_tpos_enc = torch.nn.Parameter(torch.zeros(num_maskmem, 1, 1, self.mem_dim)) |
|
trunc_normal_(self.maskmem_tpos_enc, std=0.02) |
|
self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) |
|
self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim)) |
|
trunc_normal_(self.no_mem_embed, std=0.02) |
|
trunc_normal_(self.no_mem_pos_enc, std=0.02) |
|
self.directly_add_no_mem_embed = directly_add_no_mem_embed |
|
self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc |
|
self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc |
|
self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc |
|
self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc |
|
self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval |
|
self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam |
|
self.multimask_output_in_sam = multimask_output_in_sam |
|
self.multimask_min_pt_num = multimask_min_pt_num |
|
self.multimask_max_pt_num = multimask_max_pt_num |
|
self.multimask_output_for_tracking = multimask_output_for_tracking |
|
self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr |
|
self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid |
|
self.image_size = image_size |
|
self.backbone_stride = backbone_stride |
|
self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args |
|
self.pred_obj_scores = pred_obj_scores |
|
self.pred_obj_scores_mlp = pred_obj_scores_mlp |
|
self.fixed_no_obj_ptr = fixed_no_obj_ptr |
|
self.soft_no_obj_ptr = soft_no_obj_ptr |
|
if self.fixed_no_obj_ptr: |
|
assert self.pred_obj_scores |
|
assert self.use_obj_ptrs_in_encoder |
|
if self.pred_obj_scores and self.use_obj_ptrs_in_encoder: |
|
self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim)) |
|
trunc_normal_(self.no_obj_ptr, std=0.02) |
|
self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj |
|
self._build_sam_heads() |
|
self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond |
|
self.max_cond_frames_in_attn = max_cond_frames_in_attn |
|
if compile_image_encoder: |
|
print('Image encoder compilation is enabled. First forward pass will be slow.') |
|
self.image_encoder.forward = torch.compile(self.image_encoder.forward, mode='max-autotune', fullgraph=True, dynamic=False) |
|
|
|
@property |
|
def device(self): |
|
return next(self.parameters()).device |
|
|
|
def forward(self, *args, **kwargs): |
|
raise NotImplementedError('Please use the corresponding methods in SAM2VideoPredictor for inference.See notebooks/video_predictor_example.ipynb for an example.') |
|
|
|
def _build_sam_heads(self): |
|
self.sam_prompt_embed_dim = self.hidden_dim |
|
self.sam_image_embedding_size = self.image_size // self.backbone_stride |
|
self.sam_prompt_encoder = PromptEncoder(embed_dim=self.sam_prompt_embed_dim, image_embedding_size=(self.sam_image_embedding_size, self.sam_image_embedding_size), input_image_size=(self.image_size, self.image_size), mask_in_chans=16) |
|
self.sam_mask_decoder = MaskDecoder(num_multimask_outputs=3, transformer=TwoWayTransformer(depth=2, embedding_dim=self.sam_prompt_embed_dim, mlp_dim=2048, num_heads=8), transformer_dim=self.sam_prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, use_high_res_features=self.use_high_res_features_in_sam, iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid, pred_obj_scores=self.pred_obj_scores, pred_obj_scores_mlp=self.pred_obj_scores_mlp, use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr, **self.sam_mask_decoder_extra_args or {}) |
|
if self.use_obj_ptrs_in_encoder: |
|
self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim) |
|
if self.use_mlp_for_obj_ptr_proj: |
|
self.obj_ptr_proj = MLP(self.hidden_dim, self.hidden_dim, self.hidden_dim, 3) |
|
else: |
|
self.obj_ptr_proj = torch.nn.Identity() |
|
if self.proj_tpos_enc_in_obj_ptrs: |
|
self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim) |
|
else: |
|
self.obj_ptr_tpos_proj = torch.nn.Identity() |
|
|
|
def _forward_sam_heads(self, backbone_features, point_inputs=None, mask_inputs=None, high_res_features=None, multimask_output=False): |
|
B = backbone_features.size(0) |
|
device = backbone_features.device |
|
assert backbone_features.size(1) == self.sam_prompt_embed_dim |
|
assert backbone_features.size(2) == self.sam_image_embedding_size |
|
assert backbone_features.size(3) == self.sam_image_embedding_size |
|
if point_inputs is not None: |
|
sam_point_coords = point_inputs['point_coords'] |
|
sam_point_labels = point_inputs['point_labels'] |
|
assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B |
|
else: |
|
sam_point_coords = torch.zeros(B, 1, 2, device=device) |
|
sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device) |
|
if mask_inputs is not None: |
|
assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1) |
|
if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size: |
|
sam_mask_prompt = F.interpolate(mask_inputs.float(), size=self.sam_prompt_encoder.mask_input_size, align_corners=False, mode='bilinear', antialias=True) |
|
else: |
|
sam_mask_prompt = mask_inputs |
|
else: |
|
sam_mask_prompt = None |
|
(sparse_embeddings, dense_embeddings) = self.sam_prompt_encoder(points=(sam_point_coords, sam_point_labels), boxes=None, masks=sam_mask_prompt) |
|
(low_res_multimasks, ious, sam_output_tokens, object_score_logits) = self.sam_mask_decoder(image_embeddings=backbone_features, image_pe=self.sam_prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, repeat_image=False, high_res_features=high_res_features) |
|
if self.pred_obj_scores: |
|
is_obj_appearing = object_score_logits > 0 |
|
low_res_multimasks = torch.where(is_obj_appearing[:, None, None], low_res_multimasks, NO_OBJ_SCORE) |
|
low_res_multimasks = low_res_multimasks.float() |
|
high_res_multimasks = F.interpolate(low_res_multimasks, size=(self.image_size, self.image_size), mode='bilinear', align_corners=False) |
|
sam_output_token = sam_output_tokens[:, 0] |
|
if multimask_output: |
|
best_iou_inds = torch.argmax(ious, dim=-1) |
|
batch_inds = torch.arange(B, device=device) |
|
low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) |
|
high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1) |
|
if sam_output_tokens.size(1) > 1: |
|
sam_output_token = sam_output_tokens[batch_inds, best_iou_inds] |
|
else: |
|
(low_res_masks, high_res_masks) = (low_res_multimasks, high_res_multimasks) |
|
obj_ptr = self.obj_ptr_proj(sam_output_token) |
|
if self.pred_obj_scores: |
|
if self.soft_no_obj_ptr: |
|
assert not self.teacher_force_obj_scores_for_mem |
|
lambda_is_obj_appearing = object_score_logits.sigmoid() |
|
else: |
|
lambda_is_obj_appearing = is_obj_appearing.float() |
|
if self.fixed_no_obj_ptr: |
|
obj_ptr = lambda_is_obj_appearing * obj_ptr |
|
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr |
|
return (low_res_multimasks, high_res_multimasks, ious, low_res_masks, high_res_masks, obj_ptr, object_score_logits) |
|
|
|
def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): |
|
(out_scale, out_bias) = (20.0, -10.0) |
|
mask_inputs_float = mask_inputs.float() |
|
high_res_masks = mask_inputs_float * out_scale + out_bias |
|
low_res_masks = F.interpolate(high_res_masks, size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4), align_corners=False, mode='bilinear', antialias=True) |
|
ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float() |
|
if not self.use_obj_ptrs_in_encoder: |
|
obj_ptr = torch.zeros(mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device) |
|
else: |
|
(_, _, _, _, _, obj_ptr, _) = self._forward_sam_heads(backbone_features=backbone_features, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features) |
|
is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1) |
|
is_obj_appearing = is_obj_appearing[..., None] |
|
lambda_is_obj_appearing = is_obj_appearing.float() |
|
object_score_logits = out_scale * lambda_is_obj_appearing + out_bias |
|
if self.pred_obj_scores: |
|
if self.fixed_no_obj_ptr: |
|
obj_ptr = lambda_is_obj_appearing * obj_ptr |
|
obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr |
|
return (low_res_masks, high_res_masks, ious, low_res_masks, high_res_masks, obj_ptr, object_score_logits) |
|
|
|
def forward_image(self, img_batch: torch.Tensor): |
|
backbone_out = self.image_encoder(img_batch) |
|
if self.use_high_res_features_in_sam: |
|
backbone_out['backbone_fpn'][0] = self.sam_mask_decoder.conv_s0(backbone_out['backbone_fpn'][0]) |
|
backbone_out['backbone_fpn'][1] = self.sam_mask_decoder.conv_s1(backbone_out['backbone_fpn'][1]) |
|
return backbone_out |
|
|
|
def _prepare_backbone_features(self, backbone_out): |
|
backbone_out = backbone_out.copy() |
|
assert len(backbone_out['backbone_fpn']) == len(backbone_out['vision_pos_enc']) |
|
assert len(backbone_out['backbone_fpn']) >= self.num_feature_levels |
|
feature_maps = backbone_out['backbone_fpn'][-self.num_feature_levels:] |
|
vision_pos_embeds = backbone_out['vision_pos_enc'][-self.num_feature_levels:] |
|
feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds] |
|
vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps] |
|
vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds] |
|
return (backbone_out, vision_feats, vision_pos_embeds, feat_sizes) |
|
|
|
def _prepare_memory_conditioned_features(self, frame_idx, is_init_cond_frame, current_vision_feats, current_vision_pos_embeds, feat_sizes, output_dict, num_frames, track_in_reverse=False): |
|
B = current_vision_feats[-1].size(1) |
|
C = self.hidden_dim |
|
(H, W) = feat_sizes[-1] |
|
device = current_vision_feats[-1].device |
|
if self.num_maskmem == 0: |
|
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) |
|
return pix_feat |
|
num_obj_ptr_tokens = 0 |
|
if not is_init_cond_frame: |
|
(to_cat_memory, to_cat_memory_pos_embed) = ([], []) |
|
assert len(output_dict['cond_frame_outputs']) > 0 |
|
cond_outputs = output_dict['cond_frame_outputs'] |
|
(selected_cond_outputs, unselected_cond_outputs) = select_closest_cond_frames(frame_idx, cond_outputs, self.max_cond_frames_in_attn) |
|
t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] |
|
r = self.memory_temporal_stride_for_eval |
|
for t_pos in range(1, self.num_maskmem): |
|
t_rel = self.num_maskmem - t_pos |
|
if t_rel == 1: |
|
if not track_in_reverse: |
|
prev_frame_idx = frame_idx - t_rel |
|
else: |
|
prev_frame_idx = frame_idx + t_rel |
|
elif not track_in_reverse: |
|
prev_frame_idx = (frame_idx - 2) // r * r |
|
prev_frame_idx = prev_frame_idx - (t_rel - 2) * r |
|
else: |
|
prev_frame_idx = -(-(frame_idx + 2) // r) * r |
|
prev_frame_idx = prev_frame_idx + (t_rel - 2) * r |
|
out = output_dict['non_cond_frame_outputs'].get(prev_frame_idx, None) |
|
if out is None: |
|
out = unselected_cond_outputs.get(prev_frame_idx, None) |
|
t_pos_and_prevs.append((t_pos, out)) |
|
for (t_pos, prev) in t_pos_and_prevs: |
|
if prev is None: |
|
continue |
|
feats = prev['maskmem_features'].to(device, non_blocking=True) |
|
to_cat_memory.append(feats.flatten(2).permute(2, 0, 1)) |
|
maskmem_enc = prev['maskmem_pos_enc'][-1].to(device) |
|
maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1) |
|
maskmem_enc = maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1] |
|
to_cat_memory_pos_embed.append(maskmem_enc) |
|
if self.use_obj_ptrs_in_encoder: |
|
max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder) |
|
if not self.training and self.only_obj_ptrs_in_the_past_for_eval: |
|
ptr_cond_outputs = {t: out for (t, out) in selected_cond_outputs.items() if (t >= frame_idx if track_in_reverse else t <= frame_idx)} |
|
else: |
|
ptr_cond_outputs = selected_cond_outputs |
|
pos_and_ptrs = [(abs(frame_idx - t), out['obj_ptr']) for (t, out) in ptr_cond_outputs.items()] |
|
for t_diff in range(1, max_obj_ptrs_in_encoder): |
|
t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff |
|
if t < 0 or (num_frames is not None and t >= num_frames): |
|
break |
|
out = output_dict['non_cond_frame_outputs'].get(t, unselected_cond_outputs.get(t, None)) |
|
if out is not None: |
|
pos_and_ptrs.append((t_diff, out['obj_ptr'])) |
|
if len(pos_and_ptrs) > 0: |
|
(pos_list, ptrs_list) = zip(*pos_and_ptrs) |
|
obj_ptrs = torch.stack(ptrs_list, dim=0) |
|
if self.add_tpos_enc_to_obj_ptrs: |
|
t_diff_max = max_obj_ptrs_in_encoder - 1 |
|
tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim |
|
obj_pos = torch.tensor(pos_list, device=device) |
|
obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim) |
|
obj_pos = self.obj_ptr_tpos_proj(obj_pos) |
|
obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim) |
|
else: |
|
obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim) |
|
if self.mem_dim < C: |
|
obj_ptrs = obj_ptrs.reshape(-1, B, C // self.mem_dim, self.mem_dim) |
|
obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) |
|
obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0) |
|
to_cat_memory.append(obj_ptrs) |
|
to_cat_memory_pos_embed.append(obj_pos) |
|
num_obj_ptr_tokens = obj_ptrs.shape[0] |
|
else: |
|
num_obj_ptr_tokens = 0 |
|
else: |
|
if self.directly_add_no_mem_embed: |
|
pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed |
|
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) |
|
return pix_feat_with_mem |
|
to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)] |
|
to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)] |
|
memory = torch.cat(to_cat_memory, dim=0) |
|
memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) |
|
pix_feat_with_mem = self.memory_attention(curr=current_vision_feats, curr_pos=current_vision_pos_embeds, memory=memory, memory_pos=memory_pos_embed, num_obj_ptr_tokens=num_obj_ptr_tokens) |
|
pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) |
|
return pix_feat_with_mem |
|
|
|
def _encode_new_memory(self, current_vision_feats, feat_sizes, pred_masks_high_res, is_mask_from_pts): |
|
B = current_vision_feats[-1].size(1) |
|
C = self.hidden_dim |
|
(H, W) = feat_sizes[-1] |
|
pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W) |
|
if self.non_overlap_masks_for_mem_enc and (not self.training): |
|
pred_masks_high_res = self._apply_non_overlapping_constraints(pred_masks_high_res) |
|
binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts |
|
if binarize and (not self.training): |
|
mask_for_mem = (pred_masks_high_res > 0).float() |
|
else: |
|
mask_for_mem = torch.sigmoid(pred_masks_high_res) |
|
if self.sigmoid_scale_for_mem_enc != 1.0: |
|
mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc |
|
if self.sigmoid_bias_for_mem_enc != 0.0: |
|
mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc |
|
maskmem_out = self.memory_encoder(pix_feat, mask_for_mem, skip_mask_sigmoid=True) |
|
maskmem_features = maskmem_out['vision_features'] |
|
maskmem_pos_enc = maskmem_out['vision_pos_enc'] |
|
return (maskmem_features, maskmem_pos_enc) |
|
|
|
def track_step(self, frame_idx, is_init_cond_frame, current_vision_feats, current_vision_pos_embeds, feat_sizes, point_inputs, mask_inputs, output_dict, num_frames, track_in_reverse=False, run_mem_encoder=True, prev_sam_mask_logits=None): |
|
current_out = {'point_inputs': point_inputs, 'mask_inputs': mask_inputs} |
|
if len(current_vision_feats) > 1: |
|
high_res_features = [x.permute(1, 2, 0).view(x.size(1), x.size(2), *s) for (x, s) in zip(current_vision_feats[:-1], feat_sizes[:-1])] |
|
else: |
|
high_res_features = None |
|
if mask_inputs is not None and self.use_mask_input_as_output_without_sam: |
|
pix_feat = current_vision_feats[-1].permute(1, 2, 0) |
|
pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) |
|
sam_outputs = self._use_mask_as_output(pix_feat, high_res_features, mask_inputs) |
|
else: |
|
pix_feat_with_mem = self._prepare_memory_conditioned_features(frame_idx=frame_idx, is_init_cond_frame=is_init_cond_frame, current_vision_feats=current_vision_feats[-1:], current_vision_pos_embeds=current_vision_pos_embeds[-1:], feat_sizes=feat_sizes[-1:], output_dict=output_dict, num_frames=num_frames, track_in_reverse=track_in_reverse) |
|
if prev_sam_mask_logits is not None: |
|
assert point_inputs is not None and mask_inputs is None |
|
mask_inputs = prev_sam_mask_logits |
|
multimask_output = self._use_multimask(is_init_cond_frame, point_inputs) |
|
sam_outputs = self._forward_sam_heads(backbone_features=pix_feat_with_mem, point_inputs=point_inputs, mask_inputs=mask_inputs, high_res_features=high_res_features, multimask_output=multimask_output) |
|
(_, _, _, low_res_masks, high_res_masks, obj_ptr, _) = sam_outputs |
|
current_out['pred_masks'] = low_res_masks |
|
current_out['pred_masks_high_res'] = high_res_masks |
|
current_out['obj_ptr'] = obj_ptr |
|
if run_mem_encoder and self.num_maskmem > 0: |
|
high_res_masks_for_mem_enc = high_res_masks |
|
(maskmem_features, maskmem_pos_enc) = self._encode_new_memory(current_vision_feats=current_vision_feats, feat_sizes=feat_sizes, pred_masks_high_res=high_res_masks_for_mem_enc, is_mask_from_pts=point_inputs is not None) |
|
current_out['maskmem_features'] = maskmem_features |
|
current_out['maskmem_pos_enc'] = maskmem_pos_enc |
|
else: |
|
current_out['maskmem_features'] = None |
|
current_out['maskmem_pos_enc'] = None |
|
return current_out |
|
|
|
def _use_multimask(self, is_init_cond_frame, point_inputs): |
|
num_pts = 0 if point_inputs is None else point_inputs['point_labels'].size(1) |
|
multimask_output = self.multimask_output_in_sam and (is_init_cond_frame or self.multimask_output_for_tracking) and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num) |
|
return multimask_output |
|
|
|
def _apply_non_overlapping_constraints(self, pred_masks): |
|
batch_size = pred_masks.size(0) |
|
if batch_size == 1: |
|
return pred_masks |
|
device = pred_masks.device |
|
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True) |
|
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None] |
|
keep = max_obj_inds == batch_obj_inds |
|
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0)) |
|
return pred_masks |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/modeling/sam2_utils.py |
|
import copy |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num): |
|
if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num: |
|
selected_outputs = cond_frame_outputs |
|
unselected_outputs = {} |
|
else: |
|
assert max_cond_frame_num >= 2, 'we should allow using 2+ conditioning frames' |
|
selected_outputs = {} |
|
idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None) |
|
if idx_before is not None: |
|
selected_outputs[idx_before] = cond_frame_outputs[idx_before] |
|
idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None) |
|
if idx_after is not None: |
|
selected_outputs[idx_after] = cond_frame_outputs[idx_after] |
|
num_remain = max_cond_frame_num - len(selected_outputs) |
|
inds_remain = sorted((t for t in cond_frame_outputs if t not in selected_outputs), key=lambda x: abs(x - frame_idx))[:num_remain] |
|
selected_outputs.update(((t, cond_frame_outputs[t]) for t in inds_remain)) |
|
unselected_outputs = {t: v for (t, v) in cond_frame_outputs.items() if t not in selected_outputs} |
|
return (selected_outputs, unselected_outputs) |
|
|
|
def get_1d_sine_pe(pos_inds, dim, temperature=10000): |
|
pe_dim = dim // 2 |
|
dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device) |
|
dim_t = temperature ** (2 * (dim_t // 2) / pe_dim) |
|
pos_embed = pos_inds.unsqueeze(-1) / dim_t |
|
pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1) |
|
return pos_embed |
|
|
|
def get_activation_fn(activation): |
|
if activation == 'relu': |
|
return F.relu |
|
if activation == 'gelu': |
|
return F.gelu |
|
if activation == 'glu': |
|
return F.glu |
|
raise RuntimeError(f'activation should be relu/gelu, not {activation}.') |
|
|
|
def get_clones(module, N): |
|
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) |
|
|
|
class DropPath(nn.Module): |
|
|
|
def __init__(self, drop_prob=0.0, scale_by_keep=True): |
|
super(DropPath, self).__init__() |
|
self.drop_prob = drop_prob |
|
self.scale_by_keep = scale_by_keep |
|
|
|
def forward(self, x): |
|
if self.drop_prob == 0.0 or not self.training: |
|
return x |
|
keep_prob = 1 - self.drop_prob |
|
shape = (x.shape[0],) + (1,) * (x.ndim - 1) |
|
random_tensor = x.new_empty(shape).bernoulli_(keep_prob) |
|
if keep_prob > 0.0 and self.scale_by_keep: |
|
random_tensor.div_(keep_prob) |
|
return x * random_tensor |
|
|
|
class MLP(nn.Module): |
|
|
|
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, activation: nn.Module=nn.ReLU, sigmoid_output: bool=False) -> None: |
|
super().__init__() |
|
self.num_layers = num_layers |
|
h = [hidden_dim] * (num_layers - 1) |
|
self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim]))) |
|
self.sigmoid_output = sigmoid_output |
|
self.act = activation() |
|
|
|
def forward(self, x): |
|
for (i, layer) in enumerate(self.layers): |
|
x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x) |
|
if self.sigmoid_output: |
|
x = F.sigmoid(x) |
|
return x |
|
|
|
class LayerNorm2d(nn.Module): |
|
|
|
def __init__(self, num_channels: int, eps: float=1e-06) -> None: |
|
super().__init__() |
|
self.weight = nn.Parameter(torch.ones(num_channels)) |
|
self.bias = nn.Parameter(torch.zeros(num_channels)) |
|
self.eps = eps |
|
|
|
def forward(self, x: torch.Tensor) -> torch.Tensor: |
|
u = x.mean(1, keepdim=True) |
|
s = (x - u).pow(2).mean(1, keepdim=True) |
|
x = (x - u) / torch.sqrt(s + self.eps) |
|
x = self.weight[:, None, None] * x + self.bias[:, None, None] |
|
return x |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/sam2_image_predictor.py |
|
import os |
|
import logging |
|
from typing import List, Optional, Tuple, Union |
|
import numpy as np |
|
import torch |
|
from PIL.Image import Image |
|
from sam2.modeling.sam2_base import SAM2Base |
|
from sam2.utils.transforms import SAM2Transforms |
|
|
|
class SAM2ImagePredictor: |
|
|
|
def __init__(self, sam_model: SAM2Base, mask_threshold=0.0, max_hole_area=0.0, max_sprinkle_area=0.0, **kwargs) -> None: |
|
super().__init__() |
|
self.model = sam_model |
|
self._transforms = SAM2Transforms(resolution=self.model.image_size, mask_threshold=mask_threshold, max_hole_area=max_hole_area, max_sprinkle_area=max_sprinkle_area) |
|
self._is_image_set = False |
|
self._features = None |
|
self._orig_hw = None |
|
self._is_batch = False |
|
self.mask_threshold = mask_threshold |
|
self._bb_feat_sizes = [(256, 256), (128, 128), (64, 64)] |
|
|
|
@classmethod |
|
def from_pretrained(cls, model_id: str, **kwargs) -> 'SAM2ImagePredictor': |
|
from sam2.build_sam import build_sam2_hf |
|
sam_model = build_sam2_hf(model_id, **kwargs) |
|
return cls(sam_model, **kwargs) |
|
|
|
@torch.no_grad() |
|
def set_image(self, image: Union[np.ndarray, Image]) -> None: |
|
self.reset_predictor() |
|
if isinstance(image, np.ndarray): |
|
logging.info('For numpy array image, we assume (HxWxC) format') |
|
self._orig_hw = [image.shape[:2]] |
|
elif isinstance(image, Image): |
|
(w, h) = image.size |
|
self._orig_hw = [(h, w)] |
|
else: |
|
raise NotImplementedError('Image format not supported') |
|
input_image = self._transforms(image) |
|
input_image = input_image[None, ...].to(self.device) |
|
assert len(input_image.shape) == 4 and input_image.shape[1] == 3, f'input_image must be of size 1x3xHxW, got {input_image.shape}' |
|
logging.info('Computing image embeddings for the provided image...') |
|
backbone_out = self.model.forward_image(input_image) |
|
(_, vision_feats, _, _) = self.model._prepare_backbone_features(backbone_out) |
|
if self.model.directly_add_no_mem_embed: |
|
vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed |
|
feats = [feat.permute(1, 2, 0).view(1, -1, *feat_size) for (feat, feat_size) in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])][::-1] |
|
self._features = {'image_embed': feats[-1], 'high_res_feats': feats[:-1]} |
|
self._is_image_set = True |
|
serialize_ground = os.environ.get('SERIALIZE_GROUND', False) |
|
if serialize_ground: |
|
image_embed = self._features['image_embed'].cpu().numpy() |
|
high_res_feats = self._features['high_res_feats'] |
|
feats_s0 = high_res_feats[0].cpu().numpy() |
|
feats_s1 = high_res_feats[1].cpu().numpy() |
|
np.save('image_embed.npy', image_embed) |
|
np.save('feats_s0.npy', feats_s0) |
|
np.save('feats_s1.npy', feats_s1) |
|
logging.info('Image embeddings computed.') |
|
|
|
@torch.no_grad() |
|
def encode_image_raw(self, prepared_image: torch.Tensor): |
|
self.model.eval() |
|
with torch.no_grad(): |
|
for (_, param) in self.model.named_parameters(): |
|
if param.requires_grad: |
|
param.requires_grad = False |
|
backbone_out = self.model.forward_image(prepared_image) |
|
(_, vision_feats, _, _) = self.model._prepare_backbone_features(backbone_out) |
|
if self.model.directly_add_no_mem_embed: |
|
vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed |
|
feats = [feat.permute(1, 2, 0).view(1, -1, *feat_size) for (feat, feat_size) in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])][::-1] |
|
image_embed = feats[-1] |
|
high_res_feats = feats[:-1] |
|
assert len(high_res_feats) == 2 |
|
(feats_s0, feats_s1) = (high_res_feats[0], high_res_feats[1]) |
|
return (image_embed, feats_s0, feats_s1) |
|
|
|
@torch.no_grad() |
|
def encode_points_raw(self, unnorm_coords: torch.Tensor, labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: |
|
concat_points = (unnorm_coords, labels) |
|
with torch.no_grad(): |
|
for (_, param) in self.model.named_parameters(): |
|
if param.requires_grad: |
|
param.requires_grad = False |
|
(sparse_embeddings, dense_embeddings) = self.model.sam_prompt_encoder.points_only(points=concat_points) |
|
return (sparse_embeddings, dense_embeddings) |
|
|
|
@torch.no_grad() |
|
def decode_masks_raw(self, image_embeddings: torch.Tensor, sparse_embedding: torch.Tensor, dense_embedding: torch.Tensor, high_res_features: List[torch.Tensor], multimask_output: bool=True, batched_mode: bool=False): |
|
with torch.no_grad(): |
|
for (_, param) in self.model.sam_mask_decoder.named_parameters(): |
|
if param.requires_grad: |
|
param.requires_grad = False |
|
(low_res_masks, iou_scores, _, _) = self.model.sam_mask_decoder(image_embeddings=image_embeddings, image_pe=self.model.sam_prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embedding, dense_prompt_embeddings=dense_embedding, multimask_output=multimask_output, repeat_image=batched_mode, high_res_features=high_res_features) |
|
return (low_res_masks, iou_scores) |
|
|
|
@torch.no_grad() |
|
def set_image_batch(self, image_list: List[Union[np.ndarray]]) -> None: |
|
self.reset_predictor() |
|
assert isinstance(image_list, list) |
|
self._orig_hw = [] |
|
for image in image_list: |
|
assert isinstance(image, np.ndarray), 'Images are expected to be an np.ndarray in RGB format, and of shape HWC' |
|
self._orig_hw.append(image.shape[:2]) |
|
img_batch = self._transforms.forward_batch(image_list) |
|
img_batch = img_batch.to(self.device) |
|
batch_size = img_batch.shape[0] |
|
assert len(img_batch.shape) == 4 and img_batch.shape[1] == 3, f'img_batch must be of size Bx3xHxW, got {img_batch.shape}' |
|
logging.info('Computing image embeddings for the provided images...') |
|
backbone_out = self.model.forward_image(img_batch) |
|
(_, vision_feats, _, _) = self.model._prepare_backbone_features(backbone_out) |
|
if self.model.directly_add_no_mem_embed: |
|
vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed |
|
feats = [feat.permute(1, 2, 0).view(batch_size, -1, *feat_size) for (feat, feat_size) in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])][::-1] |
|
self._features = {'image_embed': feats[-1], 'high_res_feats': feats[:-1]} |
|
self._is_image_set = True |
|
self._is_batch = True |
|
logging.info('Image embeddings computed.') |
|
|
|
def predict_batch(self, point_coords_batch: List[np.ndarray]=None, point_labels_batch: List[np.ndarray]=None, box_batch: List[np.ndarray]=None, mask_input_batch: List[np.ndarray]=None, multimask_output: bool=True, return_logits: bool=False, normalize_coords=True) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]: |
|
assert self._is_batch, 'This function should only be used when in batched mode' |
|
if not self._is_image_set: |
|
raise RuntimeError('An image must be set with .set_image_batch(...) before mask prediction.') |
|
num_images = len(self._features['image_embed']) |
|
all_masks = [] |
|
all_ious = [] |
|
all_low_res_masks = [] |
|
for img_idx in range(num_images): |
|
point_coords = point_coords_batch[img_idx] if point_coords_batch is not None else None |
|
point_labels = point_labels_batch[img_idx] if point_labels_batch is not None else None |
|
box = box_batch[img_idx] if box_batch is not None else None |
|
mask_input = mask_input_batch[img_idx] if mask_input_batch is not None else None |
|
(mask_input, unnorm_coords, labels, unnorm_box) = self._prep_prompts(point_coords, point_labels, box, mask_input, normalize_coords, img_idx=img_idx) |
|
(masks, iou_predictions, low_res_masks) = self._predict(unnorm_coords, labels, unnorm_box, mask_input, multimask_output, return_logits=return_logits, img_idx=img_idx) |
|
masks_np = masks.squeeze(0).float().detach().cpu().numpy() |
|
iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() |
|
low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() |
|
all_masks.append(masks_np) |
|
all_ious.append(iou_predictions_np) |
|
all_low_res_masks.append(low_res_masks_np) |
|
return (all_masks, all_ious, all_low_res_masks) |
|
|
|
def predict(self, point_coords: Optional[np.ndarray]=None, point_labels: Optional[np.ndarray]=None, box: Optional[np.ndarray]=None, mask_input: Optional[np.ndarray]=None, multimask_output: bool=True, return_logits: bool=False, normalize_coords=True) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: |
|
if not self._is_image_set: |
|
raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') |
|
(mask_input, unnorm_coords, labels, unnorm_box) = self._prep_prompts(point_coords, point_labels, box, mask_input, normalize_coords) |
|
(masks, iou_predictions, low_res_masks) = self._predict(unnorm_coords, labels, unnorm_box, mask_input, multimask_output, return_logits=return_logits) |
|
masks_np = masks.squeeze(0).float().detach().cpu().numpy() |
|
iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy() |
|
low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy() |
|
return (masks_np, iou_predictions_np, low_res_masks_np) |
|
|
|
def _prep_prompts(self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1): |
|
(unnorm_coords, labels, unnorm_box, mask_input) = (None, None, None, None) |
|
if point_coords is not None: |
|
assert point_labels is not None, 'point_labels must be supplied if point_coords is supplied.' |
|
point_coords = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) |
|
unnorm_coords = self._transforms.transform_coords(point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]) |
|
labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) |
|
if len(unnorm_coords.shape) == 2: |
|
(unnorm_coords, labels) = (unnorm_coords[None, ...], labels[None, ...]) |
|
if box is not None: |
|
box = torch.as_tensor(box, dtype=torch.float, device=self.device) |
|
unnorm_box = self._transforms.transform_boxes(box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]) |
|
if mask_logits is not None: |
|
mask_input = torch.as_tensor(mask_logits, dtype=torch.float, device=self.device) |
|
if len(mask_input.shape) == 3: |
|
mask_input = mask_input[None, :, :, :] |
|
return (mask_input, unnorm_coords, labels, unnorm_box) |
|
|
|
@torch.no_grad() |
|
def _predict(self, point_coords: Optional[torch.Tensor], point_labels: Optional[torch.Tensor], boxes: Optional[torch.Tensor]=None, mask_input: Optional[torch.Tensor]=None, multimask_output: bool=True, return_logits: bool=False, img_idx: int=-1) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
|
if not self._is_image_set: |
|
raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') |
|
if point_coords is not None: |
|
concat_points = (point_coords, point_labels) |
|
else: |
|
concat_points = None |
|
if boxes is not None: |
|
box_coords = boxes.reshape(-1, 2, 2) |
|
box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device) |
|
box_labels = box_labels.repeat(boxes.size(0), 1) |
|
if concat_points is not None: |
|
concat_coords = torch.cat([box_coords, concat_points[0]], dim=1) |
|
concat_labels = torch.cat([box_labels, concat_points[1]], dim=1) |
|
concat_points = (concat_coords, concat_labels) |
|
else: |
|
concat_points = (box_coords, box_labels) |
|
(sparse_embeddings, dense_embeddings) = self.model.sam_prompt_encoder(points=concat_points, boxes=None, masks=mask_input) |
|
batched_mode = concat_points is not None and concat_points[0].shape[0] > 1 |
|
high_res_features = [feat_level[img_idx].unsqueeze(0) for feat_level in self._features['high_res_feats']] |
|
(low_res_masks, iou_predictions, _, _) = self.model.sam_mask_decoder(image_embeddings=self._features['image_embed'][img_idx].unsqueeze(0), image_pe=self.model.sam_prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, repeat_image=batched_mode, high_res_features=high_res_features) |
|
if os.environ.get('SERIALIZE_GROUND', False): |
|
low_res_masks_np = low_res_masks.cpu().numpy() |
|
np.save('low_res_masks.npy', low_res_masks_np) |
|
masks = self._transforms.postprocess_masks(low_res_masks, self._orig_hw[img_idx]) |
|
low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0) |
|
if not return_logits: |
|
masks = masks > self.mask_threshold |
|
return (masks, iou_predictions, low_res_masks) |
|
|
|
def get_image_embedding(self) -> torch.Tensor: |
|
if not self._is_image_set: |
|
raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.') |
|
assert self._features is not None, 'Features must exist if an image has been set.' |
|
return self._features['image_embed'] |
|
|
|
@property |
|
def device(self) -> torch.device: |
|
return self.model.device |
|
|
|
def reset_predictor(self) -> None: |
|
self._is_image_set = False |
|
self._features = None |
|
self._orig_hw = None |
|
self._is_batch = False |
|
|
|
# File: segment-anything-2-coreml-conversion/sam2/sam2_video_predictor.py |
|
import warnings |
|
from collections import OrderedDict |
|
import torch |
|
from tqdm import tqdm |
|
from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base |
|
from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames |
|
|
|
class SAM2VideoPredictor(SAM2Base): |
|
|
|
def __init__(self, fill_hole_area=0, non_overlap_masks=False, clear_non_cond_mem_around_input=False, clear_non_cond_mem_for_multi_obj=False, **kwargs): |
|
super().__init__(**kwargs) |
|
self.fill_hole_area = fill_hole_area |
|
self.non_overlap_masks = non_overlap_masks |
|
self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input |
|
self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj |
|
|
|
@torch.inference_mode() |
|
def init_state(self, video_path, offload_video_to_cpu=False, offload_state_to_cpu=False, async_loading_frames=False): |
|
compute_device = self.device |
|
(images, video_height, video_width) = load_video_frames(video_path=video_path, image_size=self.image_size, offload_video_to_cpu=offload_video_to_cpu, async_loading_frames=async_loading_frames, compute_device=compute_device) |
|
inference_state = {} |
|
inference_state['images'] = images |
|
inference_state['num_frames'] = len(images) |
|
inference_state['offload_video_to_cpu'] = offload_video_to_cpu |
|
inference_state['offload_state_to_cpu'] = offload_state_to_cpu |
|
inference_state['video_height'] = video_height |
|
inference_state['video_width'] = video_width |
|
inference_state['device'] = compute_device |
|
if offload_state_to_cpu: |
|
inference_state['storage_device'] = torch.device('cpu') |
|
else: |
|
inference_state['storage_device'] = compute_device |
|
inference_state['point_inputs_per_obj'] = {} |
|
inference_state['mask_inputs_per_obj'] = {} |
|
inference_state['cached_features'] = {} |
|
inference_state['constants'] = {} |
|
inference_state['obj_id_to_idx'] = OrderedDict() |
|
inference_state['obj_idx_to_id'] = OrderedDict() |
|
inference_state['obj_ids'] = [] |
|
inference_state['output_dict'] = {'cond_frame_outputs': {}, 'non_cond_frame_outputs': {}} |
|
inference_state['output_dict_per_obj'] = {} |
|
inference_state['temp_output_dict_per_obj'] = {} |
|
inference_state['consolidated_frame_inds'] = {'cond_frame_outputs': set(), 'non_cond_frame_outputs': set()} |
|
inference_state['tracking_has_started'] = False |
|
inference_state['frames_already_tracked'] = {} |
|
self._get_image_feature(inference_state, frame_idx=0, batch_size=1) |
|
return inference_state |
|
|
|
@classmethod |
|
def from_pretrained(cls, model_id: str, **kwargs) -> 'SAM2VideoPredictor': |
|
from sam2.build_sam import build_sam2_video_predictor_hf |
|
sam_model = build_sam2_video_predictor_hf(model_id, **kwargs) |
|
return sam_model |
|
|
|
def _obj_id_to_idx(self, inference_state, obj_id): |
|
obj_idx = inference_state['obj_id_to_idx'].get(obj_id, None) |
|
if obj_idx is not None: |
|
return obj_idx |
|
allow_new_object = not inference_state['tracking_has_started'] |
|
if allow_new_object: |
|
obj_idx = len(inference_state['obj_id_to_idx']) |
|
inference_state['obj_id_to_idx'][obj_id] = obj_idx |
|
inference_state['obj_idx_to_id'][obj_idx] = obj_id |
|
inference_state['obj_ids'] = list(inference_state['obj_id_to_idx']) |
|
inference_state['point_inputs_per_obj'][obj_idx] = {} |
|
inference_state['mask_inputs_per_obj'][obj_idx] = {} |
|
inference_state['output_dict_per_obj'][obj_idx] = {'cond_frame_outputs': {}, 'non_cond_frame_outputs': {}} |
|
inference_state['temp_output_dict_per_obj'][obj_idx] = {'cond_frame_outputs': {}, 'non_cond_frame_outputs': {}} |
|
return obj_idx |
|
else: |
|
raise RuntimeError(f"Cannot add new object id {obj_id} after tracking starts. All existing object ids: {inference_state['obj_ids']}. Please call 'reset_state' to restart from scratch.") |
|
|
|
def _obj_idx_to_id(self, inference_state, obj_idx): |
|
return inference_state['obj_idx_to_id'][obj_idx] |
|
|
|
def _get_obj_num(self, inference_state): |
|
return len(inference_state['obj_idx_to_id']) |
|
|
|
@torch.inference_mode() |
|
def add_new_points_or_box(self, inference_state, frame_idx, obj_id, points=None, labels=None, clear_old_points=True, normalize_coords=True, box=None): |
|
obj_idx = self._obj_id_to_idx(inference_state, obj_id) |
|
point_inputs_per_frame = inference_state['point_inputs_per_obj'][obj_idx] |
|
mask_inputs_per_frame = inference_state['mask_inputs_per_obj'][obj_idx] |
|
if (points is not None) != (labels is not None): |
|
raise ValueError('points and labels must be provided together') |
|
if points is None and box is None: |
|
raise ValueError('at least one of points or box must be provided as input') |
|
if points is None: |
|
points = torch.zeros(0, 2, dtype=torch.float32) |
|
elif not isinstance(points, torch.Tensor): |
|
points = torch.tensor(points, dtype=torch.float32) |
|
if labels is None: |
|
labels = torch.zeros(0, dtype=torch.int32) |
|
elif not isinstance(labels, torch.Tensor): |
|
labels = torch.tensor(labels, dtype=torch.int32) |
|
if points.dim() == 2: |
|
points = points.unsqueeze(0) |
|
if labels.dim() == 1: |
|
labels = labels.unsqueeze(0) |
|
if box is not None: |
|
if not clear_old_points: |
|
raise ValueError('cannot add box without clearing old points, since box prompt must be provided before any point prompt (please use clear_old_points=True instead)') |
|
if inference_state['tracking_has_started']: |
|
warnings.warn("You are adding a box after tracking starts. SAM 2 may not always be able to incorporate a box prompt for *refinement*. If you intend to use box prompt as an *initial* input before tracking, please call 'reset_state' on the inference state to restart from scratch.", category=UserWarning, stacklevel=2) |
|
if not isinstance(box, torch.Tensor): |
|
box = torch.tensor(box, dtype=torch.float32, device=points.device) |
|
box_coords = box.reshape(1, 2, 2) |
|
box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device) |
|
box_labels = box_labels.reshape(1, 2) |
|
points = torch.cat([box_coords, points], dim=1) |
|
labels = torch.cat([box_labels, labels], dim=1) |
|
if normalize_coords: |
|
video_H = inference_state['video_height'] |
|
video_W = inference_state['video_width'] |
|
points = points / torch.tensor([video_W, video_H]).to(points.device) |
|
points = points * self.image_size |
|
points = points.to(inference_state['device']) |
|
labels = labels.to(inference_state['device']) |
|
if not clear_old_points: |
|
point_inputs = point_inputs_per_frame.get(frame_idx, None) |
|
else: |
|
point_inputs = None |
|
point_inputs = concat_points(point_inputs, points, labels) |
|
point_inputs_per_frame[frame_idx] = point_inputs |
|
mask_inputs_per_frame.pop(frame_idx, None) |
|
is_init_cond_frame = frame_idx not in inference_state['frames_already_tracked'] |
|
if is_init_cond_frame: |
|
reverse = False |
|
else: |
|
reverse = inference_state['frames_already_tracked'][frame_idx]['reverse'] |
|
obj_output_dict = inference_state['output_dict_per_obj'][obj_idx] |
|
obj_temp_output_dict = inference_state['temp_output_dict_per_obj'][obj_idx] |
|
is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond |
|
storage_key = 'cond_frame_outputs' if is_cond else 'non_cond_frame_outputs' |
|
prev_sam_mask_logits = None |
|
prev_out = obj_temp_output_dict[storage_key].get(frame_idx) |
|
if prev_out is None: |
|
prev_out = obj_output_dict['cond_frame_outputs'].get(frame_idx) |
|
if prev_out is None: |
|
prev_out = obj_output_dict['non_cond_frame_outputs'].get(frame_idx) |
|
if prev_out is not None and prev_out['pred_masks'] is not None: |
|
device = inference_state['device'] |
|
prev_sam_mask_logits = prev_out['pred_masks'].to(device, non_blocking=True) |
|
prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0) |
|
(current_out, _) = self._run_single_frame_inference(inference_state=inference_state, output_dict=obj_output_dict, frame_idx=frame_idx, batch_size=1, is_init_cond_frame=is_init_cond_frame, point_inputs=point_inputs, mask_inputs=None, reverse=reverse, run_mem_encoder=False, prev_sam_mask_logits=prev_sam_mask_logits) |
|
obj_temp_output_dict[storage_key][frame_idx] = current_out |
|
obj_ids = inference_state['obj_ids'] |
|
consolidated_out = self._consolidate_temp_output_across_obj(inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True) |
|
(_, video_res_masks) = self._get_orig_video_res_output(inference_state, consolidated_out['pred_masks_video_res']) |
|
return (frame_idx, obj_ids, video_res_masks) |
|
|
|
def add_new_points(self, *args, **kwargs): |
|
return self.add_new_points_or_box(*args, **kwargs) |
|
|
|
@torch.inference_mode() |
|
def add_new_mask(self, inference_state, frame_idx, obj_id, mask): |
|
obj_idx = self._obj_id_to_idx(inference_state, obj_id) |
|
point_inputs_per_frame = inference_state['point_inputs_per_obj'][obj_idx] |
|
mask_inputs_per_frame = inference_state['mask_inputs_per_obj'][obj_idx] |
|
if not isinstance(mask, torch.Tensor): |
|
mask = torch.tensor(mask, dtype=torch.bool) |
|
assert mask.dim() == 2 |
|
(mask_H, mask_W) = mask.shape |
|
mask_inputs_orig = mask[None, None] |
|
mask_inputs_orig = mask_inputs_orig.float().to(inference_state['device']) |
|
if mask_H != self.image_size or mask_W != self.image_size: |
|
mask_inputs = torch.nn.functional.interpolate(mask_inputs_orig, size=(self.image_size, self.image_size), align_corners=False, mode='bilinear', antialias=True) |
|
mask_inputs = (mask_inputs >= 0.5).float() |
|
else: |
|
mask_inputs = mask_inputs_orig |
|
mask_inputs_per_frame[frame_idx] = mask_inputs |
|
point_inputs_per_frame.pop(frame_idx, None) |
|
is_init_cond_frame = frame_idx not in inference_state['frames_already_tracked'] |
|
if is_init_cond_frame: |
|
reverse = False |
|
else: |
|
reverse = inference_state['frames_already_tracked'][frame_idx]['reverse'] |
|
obj_output_dict = inference_state['output_dict_per_obj'][obj_idx] |
|
obj_temp_output_dict = inference_state['temp_output_dict_per_obj'][obj_idx] |
|
is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond |
|
storage_key = 'cond_frame_outputs' if is_cond else 'non_cond_frame_outputs' |
|
(current_out, _) = self._run_single_frame_inference(inference_state=inference_state, output_dict=obj_output_dict, frame_idx=frame_idx, batch_size=1, is_init_cond_frame=is_init_cond_frame, point_inputs=None, mask_inputs=mask_inputs, reverse=reverse, run_mem_encoder=False) |
|
obj_temp_output_dict[storage_key][frame_idx] = current_out |
|
obj_ids = inference_state['obj_ids'] |
|
consolidated_out = self._consolidate_temp_output_across_obj(inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True) |
|
(_, video_res_masks) = self._get_orig_video_res_output(inference_state, consolidated_out['pred_masks_video_res']) |
|
return (frame_idx, obj_ids, video_res_masks) |
|
|
|
def _get_orig_video_res_output(self, inference_state, any_res_masks): |
|
device = inference_state['device'] |
|
video_H = inference_state['video_height'] |
|
video_W = inference_state['video_width'] |
|
any_res_masks = any_res_masks.to(device, non_blocking=True) |
|
if any_res_masks.shape[-2:] == (video_H, video_W): |
|
video_res_masks = any_res_masks |
|
else: |
|
video_res_masks = torch.nn.functional.interpolate(any_res_masks, size=(video_H, video_W), mode='bilinear', align_corners=False) |
|
if self.non_overlap_masks: |
|
video_res_masks = self._apply_non_overlapping_constraints(video_res_masks) |
|
return (any_res_masks, video_res_masks) |
|
|
|
def _consolidate_temp_output_across_obj(self, inference_state, frame_idx, is_cond, run_mem_encoder, consolidate_at_video_res=False): |
|
batch_size = self._get_obj_num(inference_state) |
|
storage_key = 'cond_frame_outputs' if is_cond else 'non_cond_frame_outputs' |
|
if consolidate_at_video_res: |
|
assert not run_mem_encoder, 'memory encoder cannot run at video resolution' |
|
consolidated_H = inference_state['video_height'] |
|
consolidated_W = inference_state['video_width'] |
|
consolidated_mask_key = 'pred_masks_video_res' |
|
else: |
|
consolidated_H = consolidated_W = self.image_size // 4 |
|
consolidated_mask_key = 'pred_masks' |
|
consolidated_out = {'maskmem_features': None, 'maskmem_pos_enc': None, consolidated_mask_key: torch.full(size=(batch_size, 1, consolidated_H, consolidated_W), fill_value=NO_OBJ_SCORE, dtype=torch.float32, device=inference_state['storage_device']), 'obj_ptr': torch.full(size=(batch_size, self.hidden_dim), fill_value=NO_OBJ_SCORE, dtype=torch.float32, device=inference_state['device'])} |
|
empty_mask_ptr = None |
|
for obj_idx in range(batch_size): |
|
obj_temp_output_dict = inference_state['temp_output_dict_per_obj'][obj_idx] |
|
obj_output_dict = inference_state['output_dict_per_obj'][obj_idx] |
|
out = obj_temp_output_dict[storage_key].get(frame_idx, None) |
|
if out is None: |
|
out = obj_output_dict['cond_frame_outputs'].get(frame_idx, None) |
|
if out is None: |
|
out = obj_output_dict['non_cond_frame_outputs'].get(frame_idx, None) |
|
if out is None: |
|
if run_mem_encoder: |
|
if empty_mask_ptr is None: |
|
empty_mask_ptr = self._get_empty_mask_ptr(inference_state, frame_idx) |
|
consolidated_out['obj_ptr'][obj_idx:obj_idx + 1] = empty_mask_ptr |
|
continue |
|
obj_mask = out['pred_masks'] |
|
consolidated_pred_masks = consolidated_out[consolidated_mask_key] |
|
if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]: |
|
consolidated_pred_masks[obj_idx:obj_idx + 1] = obj_mask |
|
else: |
|
resized_obj_mask = torch.nn.functional.interpolate(obj_mask, size=consolidated_pred_masks.shape[-2:], mode='bilinear', align_corners=False) |
|
consolidated_pred_masks[obj_idx:obj_idx + 1] = resized_obj_mask |
|
consolidated_out['obj_ptr'][obj_idx:obj_idx + 1] = out['obj_ptr'] |
|
if run_mem_encoder: |
|
device = inference_state['device'] |
|
high_res_masks = torch.nn.functional.interpolate(consolidated_out['pred_masks'].to(device, non_blocking=True), size=(self.image_size, self.image_size), mode='bilinear', align_corners=False) |
|
if self.non_overlap_masks_for_mem_enc: |
|
high_res_masks = self._apply_non_overlapping_constraints(high_res_masks) |
|
(maskmem_features, maskmem_pos_enc) = self._run_memory_encoder(inference_state=inference_state, frame_idx=frame_idx, batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True) |
|
consolidated_out['maskmem_features'] = maskmem_features |
|
consolidated_out['maskmem_pos_enc'] = maskmem_pos_enc |
|
return consolidated_out |
|
|
|
def _get_empty_mask_ptr(self, inference_state, frame_idx): |
|
batch_size = 1 |
|
mask_inputs = torch.zeros((batch_size, 1, self.image_size, self.image_size), dtype=torch.float32, device=inference_state['device']) |
|
(_, _, current_vision_feats, current_vision_pos_embeds, feat_sizes) = self._get_image_feature(inference_state, frame_idx, batch_size) |
|
current_out = self.track_step(frame_idx=frame_idx, is_init_cond_frame=True, current_vision_feats=current_vision_feats, current_vision_pos_embeds=current_vision_pos_embeds, feat_sizes=feat_sizes, point_inputs=None, mask_inputs=mask_inputs, output_dict={}, num_frames=inference_state['num_frames'], track_in_reverse=False, run_mem_encoder=False, prev_sam_mask_logits=None) |
|
return current_out['obj_ptr'] |
|
|
|
@torch.inference_mode() |
|
def propagate_in_video_preflight(self, inference_state): |
|
inference_state['tracking_has_started'] = True |
|
batch_size = self._get_obj_num(inference_state) |
|
temp_output_dict_per_obj = inference_state['temp_output_dict_per_obj'] |
|
output_dict = inference_state['output_dict'] |
|
consolidated_frame_inds = inference_state['consolidated_frame_inds'] |
|
for is_cond in [False, True]: |
|
storage_key = 'cond_frame_outputs' if is_cond else 'non_cond_frame_outputs' |
|
temp_frame_inds = set() |
|
for obj_temp_output_dict in temp_output_dict_per_obj.values(): |
|
temp_frame_inds.update(obj_temp_output_dict[storage_key].keys()) |
|
consolidated_frame_inds[storage_key].update(temp_frame_inds) |
|
for frame_idx in temp_frame_inds: |
|
consolidated_out = self._consolidate_temp_output_across_obj(inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True) |
|
output_dict[storage_key][frame_idx] = consolidated_out |
|
self._add_output_per_object(inference_state, frame_idx, consolidated_out, storage_key) |
|
clear_non_cond_mem = self.clear_non_cond_mem_around_input and (self.clear_non_cond_mem_for_multi_obj or batch_size <= 1) |
|
if clear_non_cond_mem: |
|
self._clear_non_cond_mem_around_input(inference_state, frame_idx) |
|
for obj_temp_output_dict in temp_output_dict_per_obj.values(): |
|
obj_temp_output_dict[storage_key].clear() |
|
for frame_idx in output_dict['cond_frame_outputs']: |
|
output_dict['non_cond_frame_outputs'].pop(frame_idx, None) |
|
for obj_output_dict in inference_state['output_dict_per_obj'].values(): |
|
for frame_idx in obj_output_dict['cond_frame_outputs']: |
|
obj_output_dict['non_cond_frame_outputs'].pop(frame_idx, None) |
|
for frame_idx in consolidated_frame_inds['cond_frame_outputs']: |
|
assert frame_idx in output_dict['cond_frame_outputs'] |
|
consolidated_frame_inds['non_cond_frame_outputs'].discard(frame_idx) |
|
all_consolidated_frame_inds = consolidated_frame_inds['cond_frame_outputs'] | consolidated_frame_inds['non_cond_frame_outputs'] |
|
input_frames_inds = set() |
|
for point_inputs_per_frame in inference_state['point_inputs_per_obj'].values(): |
|
input_frames_inds.update(point_inputs_per_frame.keys()) |
|
for mask_inputs_per_frame in inference_state['mask_inputs_per_obj'].values(): |
|
input_frames_inds.update(mask_inputs_per_frame.keys()) |
|
assert all_consolidated_frame_inds == input_frames_inds |
|
|
|
@torch.inference_mode() |
|
def propagate_in_video(self, inference_state, start_frame_idx=None, max_frame_num_to_track=None, reverse=False): |
|
self.propagate_in_video_preflight(inference_state) |
|
output_dict = inference_state['output_dict'] |
|
consolidated_frame_inds = inference_state['consolidated_frame_inds'] |
|
obj_ids = inference_state['obj_ids'] |
|
num_frames = inference_state['num_frames'] |
|
batch_size = self._get_obj_num(inference_state) |
|
if len(output_dict['cond_frame_outputs']) == 0: |
|
raise RuntimeError('No points are provided; please add points first') |
|
clear_non_cond_mem = self.clear_non_cond_mem_around_input and (self.clear_non_cond_mem_for_multi_obj or batch_size <= 1) |
|
if start_frame_idx is None: |
|
start_frame_idx = min(output_dict['cond_frame_outputs']) |
|
if max_frame_num_to_track is None: |
|
max_frame_num_to_track = num_frames |
|
if reverse: |
|
end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0) |
|
if start_frame_idx > 0: |
|
processing_order = range(start_frame_idx, end_frame_idx - 1, -1) |
|
else: |
|
processing_order = [] |
|
else: |
|
end_frame_idx = min(start_frame_idx + max_frame_num_to_track, num_frames - 1) |
|
processing_order = range(start_frame_idx, end_frame_idx + 1) |
|
for frame_idx in tqdm(processing_order, desc='propagate in video'): |
|
if frame_idx in consolidated_frame_inds['cond_frame_outputs']: |
|
storage_key = 'cond_frame_outputs' |
|
current_out = output_dict[storage_key][frame_idx] |
|
pred_masks = current_out['pred_masks'] |
|
if clear_non_cond_mem: |
|
self._clear_non_cond_mem_around_input(inference_state, frame_idx) |
|
elif frame_idx in consolidated_frame_inds['non_cond_frame_outputs']: |
|
storage_key = 'non_cond_frame_outputs' |
|
current_out = output_dict[storage_key][frame_idx] |
|
pred_masks = current_out['pred_masks'] |
|
else: |
|
storage_key = 'non_cond_frame_outputs' |
|
(current_out, pred_masks) = self._run_single_frame_inference(inference_state=inference_state, output_dict=output_dict, frame_idx=frame_idx, batch_size=batch_size, is_init_cond_frame=False, point_inputs=None, mask_inputs=None, reverse=reverse, run_mem_encoder=True) |
|
output_dict[storage_key][frame_idx] = current_out |
|
self._add_output_per_object(inference_state, frame_idx, current_out, storage_key) |
|
inference_state['frames_already_tracked'][frame_idx] = {'reverse': reverse} |
|
(_, video_res_masks) = self._get_orig_video_res_output(inference_state, pred_masks) |
|
yield (frame_idx, obj_ids, video_res_masks) |
|
|
|
def _add_output_per_object(self, inference_state, frame_idx, current_out, storage_key): |
|
maskmem_features = current_out['maskmem_features'] |
|
assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor) |
|
maskmem_pos_enc = current_out['maskmem_pos_enc'] |
|
assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list) |
|
output_dict_per_obj = inference_state['output_dict_per_obj'] |
|
for (obj_idx, obj_output_dict) in output_dict_per_obj.items(): |
|
obj_slice = slice(obj_idx, obj_idx + 1) |
|
obj_out = {'maskmem_features': None, 'maskmem_pos_enc': None, 'pred_masks': current_out['pred_masks'][obj_slice], 'obj_ptr': current_out['obj_ptr'][obj_slice]} |
|
if maskmem_features is not None: |
|
obj_out['maskmem_features'] = maskmem_features[obj_slice] |
|
if maskmem_pos_enc is not None: |
|
obj_out['maskmem_pos_enc'] = [x[obj_slice] for x in maskmem_pos_enc] |
|
obj_output_dict[storage_key][frame_idx] = obj_out |
|
|
|
@torch.inference_mode() |
|
def reset_state(self, inference_state): |
|
self._reset_tracking_results(inference_state) |
|
inference_state['obj_id_to_idx'].clear() |
|
inference_state['obj_idx_to_id'].clear() |
|
inference_state['obj_ids'].clear() |
|
inference_state['point_inputs_per_obj'].clear() |
|
inference_state['mask_inputs_per_obj'].clear() |
|
inference_state['output_dict_per_obj'].clear() |
|
inference_state['temp_output_dict_per_obj'].clear() |
|
|
|
def _reset_tracking_results(self, inference_state): |
|
for v in inference_state['point_inputs_per_obj'].values(): |
|
v.clear() |
|
for v in inference_state['mask_inputs_per_obj'].values(): |
|
v.clear() |
|
for v in inference_state['output_dict_per_obj'].values(): |
|
v['cond_frame_outputs'].clear() |
|
v['non_cond_frame_outputs'].clear() |
|
for v in inference_state['temp_output_dict_per_obj'].values(): |
|
v['cond_frame_outputs'].clear() |
|
v['non_cond_frame_outputs'].clear() |
|
inference_state['output_dict']['cond_frame_outputs'].clear() |
|
inference_state['output_dict']['non_cond_frame_outputs'].clear() |
|
inference_state['consolidated_frame_inds']['cond_frame_outputs'].clear() |
|
inference_state['consolidated_frame_inds']['non_cond_frame_outputs'].clear() |
|
inference_state['tracking_has_started'] = False |
|
inference_state['frames_already_tracked'].clear() |
|
|
|
def _get_image_feature(self, inference_state, frame_idx, batch_size): |
|
(image, backbone_out) = inference_state['cached_features'].get(frame_idx, (None, None)) |
|
if backbone_out is None: |
|
device = inference_state['device'] |
|
image = inference_state['images'][frame_idx].to(device).float().unsqueeze(0) |
|
backbone_out = self.forward_image(image) |
|
inference_state['cached_features'] = {frame_idx: (image, backbone_out)} |
|
expanded_image = image.expand(batch_size, -1, -1, -1) |
|
expanded_backbone_out = {'backbone_fpn': backbone_out['backbone_fpn'].copy(), 'vision_pos_enc': backbone_out['vision_pos_enc'].copy()} |
|
for (i, feat) in enumerate(expanded_backbone_out['backbone_fpn']): |
|
expanded_backbone_out['backbone_fpn'][i] = feat.expand(batch_size, -1, -1, -1) |
|
for (i, pos) in enumerate(expanded_backbone_out['vision_pos_enc']): |
|
pos = pos.expand(batch_size, -1, -1, -1) |
|
expanded_backbone_out['vision_pos_enc'][i] = pos |
|
features = self._prepare_backbone_features(expanded_backbone_out) |
|
features = (expanded_image,) + features |
|
return features |
|
|
|
def _run_single_frame_inference(self, inference_state, output_dict, frame_idx, batch_size, is_init_cond_frame, point_inputs, mask_inputs, reverse, run_mem_encoder, prev_sam_mask_logits=None): |
|
(_, _, current_vision_feats, current_vision_pos_embeds, feat_sizes) = self._get_image_feature(inference_state, frame_idx, batch_size) |
|
assert point_inputs is None or mask_inputs is None |
|
current_out = self.track_step(frame_idx=frame_idx, is_init_cond_frame=is_init_cond_frame, current_vision_feats=current_vision_feats, current_vision_pos_embeds=current_vision_pos_embeds, feat_sizes=feat_sizes, point_inputs=point_inputs, mask_inputs=mask_inputs, output_dict=output_dict, num_frames=inference_state['num_frames'], track_in_reverse=reverse, run_mem_encoder=run_mem_encoder, prev_sam_mask_logits=prev_sam_mask_logits) |
|
storage_device = inference_state['storage_device'] |
|
maskmem_features = current_out['maskmem_features'] |
|
if maskmem_features is not None: |
|
maskmem_features = maskmem_features.to(torch.bfloat16) |
|
maskmem_features = maskmem_features.to(storage_device, non_blocking=True) |
|
pred_masks_gpu = current_out['pred_masks'] |
|
if self.fill_hole_area > 0: |
|
pred_masks_gpu = fill_holes_in_mask_scores(pred_masks_gpu, self.fill_hole_area) |
|
pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True) |
|
maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out) |
|
obj_ptr = current_out['obj_ptr'] |
|
compact_current_out = {'maskmem_features': maskmem_features, 'maskmem_pos_enc': maskmem_pos_enc, 'pred_masks': pred_masks, 'obj_ptr': obj_ptr} |
|
return (compact_current_out, pred_masks_gpu) |
|
|
|
def _run_memory_encoder(self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts): |
|
(_, _, current_vision_feats, _, feat_sizes) = self._get_image_feature(inference_state, frame_idx, batch_size) |
|
(maskmem_features, maskmem_pos_enc) = self._encode_new_memory(current_vision_feats=current_vision_feats, feat_sizes=feat_sizes, pred_masks_high_res=high_res_masks, is_mask_from_pts=is_mask_from_pts) |
|
storage_device = inference_state['storage_device'] |
|
maskmem_features = maskmem_features.to(torch.bfloat16) |
|
maskmem_features = maskmem_features.to(storage_device, non_blocking=True) |
|
maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, {'maskmem_pos_enc': maskmem_pos_enc}) |
|
return (maskmem_features, maskmem_pos_enc) |
|
|
|
def _get_maskmem_pos_enc(self, inference_state, current_out): |
|
model_constants = inference_state['constants'] |
|
out_maskmem_pos_enc = current_out['maskmem_pos_enc'] |
|
if out_maskmem_pos_enc is not None: |
|
if 'maskmem_pos_enc' not in model_constants: |
|
assert isinstance(out_maskmem_pos_enc, list) |
|
maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc] |
|
model_constants['maskmem_pos_enc'] = maskmem_pos_enc |
|
else: |
|
maskmem_pos_enc = model_constants['maskmem_pos_enc'] |
|
batch_size = out_maskmem_pos_enc[0].size(0) |
|
expanded_maskmem_pos_enc = [x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc] |
|
else: |
|
expanded_maskmem_pos_enc = None |
|
return expanded_maskmem_pos_enc |
|
|
|
def _clear_non_cond_mem_around_input(self, inference_state, frame_idx): |
|
r = self.memory_temporal_stride_for_eval |
|
frame_idx_begin = frame_idx - r * self.num_maskmem |
|
frame_idx_end = frame_idx + r * self.num_maskmem |
|
output_dict = inference_state['output_dict'] |
|
non_cond_frame_outputs = output_dict['non_cond_frame_outputs'] |
|
for t in range(frame_idx_begin, frame_idx_end + 1): |
|
non_cond_frame_outputs.pop(t, None) |
|
for obj_output_dict in inference_state['output_dict_per_obj'].values(): |
|
obj_output_dict['non_cond_frame_outputs'].pop(t, None) |
|
|
|
# File: segment-anything-2-coreml-conversion/sav_dataset/sav_evaluator.py |
|
from argparse import ArgumentParser |
|
from utils.sav_benchmark import benchmark |
|
'' |
|
parser = ArgumentParser() |
|
parser.add_argument('--gt_root', required=True, help="Path to the GT folder. For SA-V, it's sav_val/Annotations_6fps or sav_test/Annotations_6fps") |
|
parser.add_argument('--pred_root', required=True, help='Path to a folder containing folders of masks to be evaluated, with exactly the same structure as gt_root') |
|
parser.add_argument('-n', '--num_processes', default=16, type=int, help='Number of concurrent processes') |
|
parser.add_argument('-s', '--strict', help='Make sure every video in the gt_root folder has a corresponding video in the prediction', action='store_true') |
|
parser.add_argument('-q', '--quiet', help='Quietly run evaluation without printing the information out', action='store_true') |
|
parser.add_argument('--do_not_skip_first_and_last_frame', help="In SA-V val and test, we skip the first and the last annotated frames in evaluation. Set this to true for evaluation on settings that doesn't skip first and last frames", action='store_true') |
|
if __name__ == '__main__': |
|
args = parser.parse_args() |
|
benchmark([args.gt_root], [args.pred_root], args.strict, args.num_processes, verbose=not args.quiet, skip_first_and_last=not args.do_not_skip_first_and_last_frame) |
|
|
|
# File: segment-anything-2-coreml-conversion/tools/vos_inference.py |
|
import argparse |
|
import os |
|
import numpy as np |
|
import torch |
|
from PIL import Image |
|
from sam2.build_sam import build_sam2_video_predictor |
|
DAVIS_PALETTE = b'\x00\x00\x00\x80\x00\x00\x00\x80\x00\x80\x80\x00\x00\x00\x80\x80\x00\x80\x00\x80\x80\x80\x80\x80@\x00\x00\xc0\x00\x00@\x80\x00\xc0\x80\x00@\x00\x80\xc0\x00\x80@\x80\x80\xc0\x80\x80\x00@\x00\x80@\x00\x00\xc0\x00\x80\xc0\x00\x00@\x80\x80@\x80\x00\xc0\x80\x80\xc0\x80@@\x00\xc0@\x00@\xc0\x00\xc0\xc0\x00@@\x80\xc0@\x80@\xc0\x80\xc0\xc0\x80\x00\x00@\x80\x00@\x00\x80@\x80\x80@\x00\x00\xc0\x80\x00\xc0\x00\x80\xc0\x80\x80\xc0@\x00@\xc0\x00@@\x80@\xc0\x80@@\x00\xc0\xc0\x00\xc0@\x80\xc0\xc0\x80\xc0\x00@@\x80@@\x00\xc0@\x80\xc0@\x00@\xc0\x80@\xc0\x00\xc0\xc0\x80\xc0\xc0@@@\xc0@@@\xc0@\xc0\xc0@@@\xc0\xc0@\xc0@\xc0\xc0\xc0\xc0\xc0 \x00\x00\xa0\x00\x00 \x80\x00\xa0\x80\x00 \x00\x80\xa0\x00\x80 \x80\x80\xa0\x80\x80`\x00\x00\xe0\x00\x00`\x80\x00\xe0\x80\x00`\x00\x80\xe0\x00\x80`\x80\x80\xe0\x80\x80 @\x00\xa0@\x00 \xc0\x00\xa0\xc0\x00 @\x80\xa0@\x80 \xc0\x80\xa0\xc0\x80`@\x00\xe0@\x00`\xc0\x00\xe0\xc0\x00`@\x80\xe0@\x80`\xc0\x80\xe0\xc0\x80 \x00@\xa0\x00@ \x80@\xa0\x80@ \x00\xc0\xa0\x00\xc0 \x80\xc0\xa0\x80\xc0`\x00@\xe0\x00@`\x80@\xe0\x80@`\x00\xc0\xe0\x00\xc0`\x80\xc0\xe0\x80\xc0 @@\xa0@@ \xc0@\xa0\xc0@ @\xc0\xa0@\xc0 \xc0\xc0\xa0\xc0\xc0`@@\xe0@@`\xc0@\xe0\xc0@`@\xc0\xe0@\xc0`\xc0\xc0\xe0\xc0\xc0\x00 \x00\x80 \x00\x00\xa0\x00\x80\xa0\x00\x00 \x80\x80 \x80\x00\xa0\x80\x80\xa0\x80@ \x00\xc0 \x00@\xa0\x00\xc0\xa0\x00@ \x80\xc0 \x80@\xa0\x80\xc0\xa0\x80\x00`\x00\x80`\x00\x00\xe0\x00\x80\xe0\x00\x00`\x80\x80`\x80\x00\xe0\x80\x80\xe0\x80@`\x00\xc0`\x00@\xe0\x00\xc0\xe0\x00@`\x80\xc0`\x80@\xe0\x80\xc0\xe0\x80\x00 @\x80 @\x00\xa0@\x80\xa0@\x00 \xc0\x80 \xc0\x00\xa0\xc0\x80\xa0\xc0@ @\xc0 @@\xa0@\xc0\xa0@@ \xc0\xc0 \xc0@\xa0\xc0\xc0\xa0\xc0\x00`@\x80`@\x00\xe0@\x80\xe0@\x00`\xc0\x80`\xc0\x00\xe0\xc0\x80\xe0\xc0@`@\xc0`@@\xe0@\xc0\xe0@@`\xc0\xc0`\xc0@\xe0\xc0\xc0\xe0\xc0 \x00\xa0 \x00 \xa0\x00\xa0\xa0\x00 \x80\xa0 \x80 \xa0\x80\xa0\xa0\x80` \x00\xe0 \x00`\xa0\x00\xe0\xa0\x00` \x80\xe0 \x80`\xa0\x80\xe0\xa0\x80 `\x00\xa0`\x00 \xe0\x00\xa0\xe0\x00 `\x80\xa0`\x80 \xe0\x80\xa0\xe0\x80``\x00\xe0`\x00`\xe0\x00\xe0\xe0\x00``\x80\xe0`\x80`\xe0\x80\xe0\xe0\x80 @\xa0 @ \xa0@\xa0\xa0@ \xc0\xa0 \xc0 \xa0\xc0\xa0\xa0\xc0` @\xe0 @`\xa0@\xe0\xa0@` \xc0\xe0 \xc0`\xa0\xc0\xe0\xa0\xc0 `@\xa0`@ \xe0@\xa0\xe0@ `\xc0\xa0`\xc0 \xe0\xc0\xa0\xe0\xc0``@\xe0`@`\xe0@\xe0\xe0@``\xc0\xe0`\xc0`\xe0\xc0\xe0\xe0\xc0' |
|
|
|
def load_ann_png(path): |
|
mask = Image.open(path) |
|
palette = mask.getpalette() |
|
mask = np.array(mask).astype(np.uint8) |
|
return (mask, palette) |
|
|
|
def save_ann_png(path, mask, palette): |
|
assert mask.dtype == np.uint8 |
|
assert mask.ndim == 2 |
|
output_mask = Image.fromarray(mask) |
|
output_mask.putpalette(palette) |
|
output_mask.save(path) |
|
|
|
def get_per_obj_mask(mask): |
|
object_ids = np.unique(mask) |
|
object_ids = object_ids[object_ids > 0].tolist() |
|
per_obj_mask = {object_id: mask == object_id for object_id in object_ids} |
|
return per_obj_mask |
|
|
|
def put_per_obj_mask(per_obj_mask, height, width): |
|
mask = np.zeros((height, width), dtype=np.uint8) |
|
object_ids = sorted(per_obj_mask)[::-1] |
|
for object_id in object_ids: |
|
object_mask = per_obj_mask[object_id] |
|
object_mask = object_mask.reshape(height, width) |
|
mask[object_mask] = object_id |
|
return mask |
|
|
|
def load_masks_from_dir(input_mask_dir, video_name, frame_name, per_obj_png_file): |
|
if not per_obj_png_file: |
|
input_mask_path = os.path.join(input_mask_dir, video_name, f'{frame_name}.png') |
|
(input_mask, input_palette) = load_ann_png(input_mask_path) |
|
per_obj_input_mask = get_per_obj_mask(input_mask) |
|
else: |
|
per_obj_input_mask = {} |
|
for object_name in os.listdir(os.path.join(input_mask_dir, video_name)): |
|
object_id = int(object_name) |
|
input_mask_path = os.path.join(input_mask_dir, video_name, object_name, f'{frame_name}.png') |
|
(input_mask, input_palette) = load_ann_png(input_mask_path) |
|
per_obj_input_mask[object_id] = input_mask > 0 |
|
return (per_obj_input_mask, input_palette) |
|
|
|
def save_masks_to_dir(output_mask_dir, video_name, frame_name, per_obj_output_mask, height, width, per_obj_png_file, output_palette): |
|
os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True) |
|
if not per_obj_png_file: |
|
output_mask = put_per_obj_mask(per_obj_output_mask, height, width) |
|
output_mask_path = os.path.join(output_mask_dir, video_name, f'{frame_name}.png') |
|
save_ann_png(output_mask_path, output_mask, output_palette) |
|
else: |
|
for (object_id, object_mask) in per_obj_output_mask.items(): |
|
object_name = f'{object_id:03d}' |
|
os.makedirs(os.path.join(output_mask_dir, video_name, object_name), exist_ok=True) |
|
output_mask = object_mask.reshape(height, width).astype(np.uint8) |
|
output_mask_path = os.path.join(output_mask_dir, video_name, object_name, f'{frame_name}.png') |
|
save_ann_png(output_mask_path, output_mask, output_palette) |
|
|
|
@torch.inference_mode() |
|
@torch.autocast(device_type='cuda', dtype=torch.bfloat16) |
|
def vos_inference(predictor, base_video_dir, input_mask_dir, output_mask_dir, video_name, score_thresh=0.0, use_all_masks=False, per_obj_png_file=False): |
|
video_dir = os.path.join(base_video_dir, video_name) |
|
frame_names = [os.path.splitext(p)[0] for p in os.listdir(video_dir) if os.path.splitext(p)[-1] in ['.jpg', '.jpeg', '.JPG', '.JPEG']] |
|
frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) |
|
inference_state = predictor.init_state(video_path=video_dir, async_loading_frames=False) |
|
height = inference_state['video_height'] |
|
width = inference_state['video_width'] |
|
input_palette = None |
|
if not use_all_masks: |
|
input_frame_inds = [0] |
|
else: |
|
if not per_obj_png_file: |
|
input_frame_inds = [idx for (idx, name) in enumerate(frame_names) if os.path.exists(os.path.join(input_mask_dir, video_name, f'{name}.png'))] |
|
else: |
|
input_frame_inds = [idx for object_name in os.listdir(os.path.join(input_mask_dir, video_name)) for (idx, name) in enumerate(frame_names) if os.path.exists(os.path.join(input_mask_dir, video_name, object_name, f'{name}.png'))] |
|
input_frame_inds = sorted(set(input_frame_inds)) |
|
for input_frame_idx in input_frame_inds: |
|
(per_obj_input_mask, input_palette) = load_masks_from_dir(input_mask_dir=input_mask_dir, video_name=video_name, frame_name=frame_names[input_frame_idx], per_obj_png_file=per_obj_png_file) |
|
for (object_id, object_mask) in per_obj_input_mask.items(): |
|
predictor.add_new_mask(inference_state=inference_state, frame_idx=input_frame_idx, obj_id=object_id, mask=object_mask) |
|
os.makedirs(os.path.join(output_mask_dir, video_name), exist_ok=True) |
|
output_palette = input_palette or DAVIS_PALETTE |
|
video_segments = {} |
|
for (out_frame_idx, out_obj_ids, out_mask_logits) in predictor.propagate_in_video(inference_state): |
|
per_obj_output_mask = {out_obj_id: (out_mask_logits[i] > score_thresh).cpu().numpy() for (i, out_obj_id) in enumerate(out_obj_ids)} |
|
video_segments[out_frame_idx] = per_obj_output_mask |
|
for (out_frame_idx, per_obj_output_mask) in video_segments.items(): |
|
save_masks_to_dir(output_mask_dir=output_mask_dir, video_name=video_name, frame_name=frame_names[out_frame_idx], per_obj_output_mask=per_obj_output_mask, height=height, width=width, per_obj_png_file=per_obj_png_file, output_palette=output_palette) |
|
|
|
def main(): |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--sam2_cfg', type=str, default='sam2_hiera_b+.yaml', help='SAM 2 model configuration file') |
|
parser.add_argument('--sam2_checkpoint', type=str, default='./checkpoints/sam2_hiera_b+.pt', help='path to the SAM 2 model checkpoint') |
|
parser.add_argument('--base_video_dir', type=str, required=True, help='directory containing videos (as JPEG files) to run VOS prediction on') |
|
parser.add_argument('--input_mask_dir', type=str, required=True, help='directory containing input masks (as PNG files) of each video') |
|
parser.add_argument('--video_list_file', type=str, default=None, help='text file containing the list of video names to run VOS prediction on') |
|
parser.add_argument('--output_mask_dir', type=str, required=True, help='directory to save the output masks (as PNG files)') |
|
parser.add_argument('--score_thresh', type=float, default=0.0, help='threshold for the output mask logits (default: 0.0)') |
|
parser.add_argument('--use_all_masks', action='store_true', help="whether to use all available PNG files in input_mask_dir (default without this flag: just the first PNG file as input to the SAM 2 model; usually we don't need this flag, since semi-supervised VOS evaluation usually takes input from the first frame only)") |
|
parser.add_argument('--per_obj_png_file', action='store_true', help='whether use separate per-object PNG files for input and output masks (default without this flag: all object masks are packed into a single PNG file on each frame following DAVIS format; note that the SA-V dataset stores each object mask as an individual PNG file and requires this flag)') |
|
parser.add_argument('--apply_postprocessing', action='store_true', help="whether to apply postprocessing (e.g. hole-filling) to the output masks (we don't apply such post-processing in the SAM 2 model evaluation)") |
|
args = parser.parse_args() |
|
hydra_overrides_extra = ['++model.non_overlap_masks=' + ('false' if args.per_obj_png_file else 'true')] |
|
predictor = build_sam2_video_predictor(config_file=args.sam2_cfg, ckpt_path=args.sam2_checkpoint, apply_postprocessing=args.apply_postprocessing, hydra_overrides_extra=hydra_overrides_extra) |
|
if args.use_all_masks: |
|
print('using all available masks in input_mask_dir as input to the SAM 2 model') |
|
else: |
|
print("using only the first frame's mask in input_mask_dir as input to the SAM 2 model") |
|
if args.video_list_file is not None: |
|
with open(args.video_list_file, 'r') as f: |
|
video_names = [v.strip() for v in f.readlines()] |
|
else: |
|
video_names = [p for p in os.listdir(args.base_video_dir) if os.path.isdir(os.path.join(args.base_video_dir, p))] |
|
print(f'running VOS prediction on {len(video_names)} videos:\n{video_names}') |
|
for (n_video, video_name) in enumerate(video_names): |
|
print(f'\n{n_video + 1}/{len(video_names)} - running on {video_name}') |
|
vos_inference(predictor=predictor, base_video_dir=args.base_video_dir, input_mask_dir=args.input_mask_dir, output_mask_dir=args.output_mask_dir, video_name=video_name, score_thresh=args.score_thresh, use_all_masks=args.use_all_masks, per_obj_png_file=args.per_obj_png_file) |
|
print(f'completed VOS prediction on {len(video_names)} videos -- output masks saved to {args.output_mask_dir}') |
|
if __name__ == '__main__': |
|
main() |
|
|
|
|