# File: controlnet_aux-master/src/controlnet_aux/__init__.py __version__ = '0.0.9' from .anyline import AnylineDetector from .canny import CannyDetector from .dwpose import DWposeDetector from .hed import HEDdetector from .leres import LeresDetector from .lineart import LineartDetector from .lineart_anime import LineartAnimeDetector from .lineart_standard import LineartStandardDetector from .mediapipe_face import MediapipeFaceDetector from .midas import MidasDetector from .mlsd import MLSDdetector from .normalbae import NormalBaeDetector from .open_pose import OpenposeDetector from .pidi import PidiNetDetector from .segment_anything import SamDetector from .shuffle import ContentShuffleDetector from .teed import TEEDdetector from .zoe import ZoeDetector # File: controlnet_aux-master/src/controlnet_aux/anyline/__init__.py import os import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from skimage import morphology from ..teed.ted import TED from ..util import HWC3, resize_image, safe_step class AnylineDetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, subfolder=None): if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, subfolder=subfolder) model = TED() model.load_state_dict(torch.load(model_path, map_location='cpu')) return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, detect_resolution=1280, guassian_sigma=2.0, intensity_threshold=3, output_type='pil'): device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) output_type = output_type or 'pil' else: output_type = output_type or 'np' (original_height, original_width, _) = input_image.shape input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 (height, width, _) = input_image.shape with torch.no_grad(): image_teed = torch.from_numpy(input_image.copy()).float().to(device) image_teed = rearrange(image_teed, 'h w c -> 1 c h w') edges = self.model(image_teed) edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges] edges = [cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR) for e in edges] edges = np.stack(edges, axis=2) edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64))) edge = safe_step(edge, 2) edge = (edge * 255.0).clip(0, 255).astype(np.uint8) mteed_result = edge mteed_result = HWC3(mteed_result) x = input_image.astype(np.float32) g = cv2.GaussianBlur(x, (0, 0), guassian_sigma) intensity = np.min(g - x, axis=2).clip(0, 255) intensity /= max(16, np.median(intensity[intensity > intensity_threshold])) intensity *= 127 lineart_result = intensity.clip(0, 255).astype(np.uint8) lineart_result = HWC3(lineart_result) lineart_result = self.get_intensity_mask(lineart_result, lower_bound=0, upper_bound=255) cleaned = morphology.remove_small_objects(lineart_result.astype(bool), min_size=36, connectivity=1) lineart_result = lineart_result * cleaned final_result = self.combine_layers(mteed_result, lineart_result) final_result = cv2.resize(final_result, (original_width, original_height), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': final_result = Image.fromarray(final_result) return final_result def get_intensity_mask(self, image_array, lower_bound, upper_bound): mask = image_array[:, :, 0] mask = np.where((mask >= lower_bound) & (mask <= upper_bound), mask, 0) mask = np.expand_dims(mask, 2).repeat(3, axis=2) return mask def combine_layers(self, base_layer, top_layer): mask = top_layer.astype(bool) temp = 1 - (1 - top_layer) * (1 - base_layer) result = base_layer * ~mask + temp * mask return result # File: controlnet_aux-master/src/controlnet_aux/canny/__init__.py import warnings import cv2 import numpy as np from PIL import Image from ..util import HWC3, resize_image class CannyDetector: def __call__(self, input_image=None, low_threshold=100, high_threshold=200, detect_resolution=512, image_resolution=512, output_type=None, **kwargs): if 'img' in kwargs: warnings.warn('img is deprecated, please use `input_image=...` instead.', DeprecationWarning) input_image = kwargs.pop('img') if input_image is None: raise ValueError('input_image must be defined.') if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) output_type = output_type or 'pil' else: output_type = output_type or 'np' input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) detected_map = cv2.Canny(input_image, low_threshold, high_threshold) detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/dwpose/__init__.py import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' import cv2 import torch import numpy as np from PIL import Image from ..util import HWC3, resize_image from . import util def draw_pose(pose, H, W): bodies = pose['bodies'] faces = pose['faces'] hands = pose['hands'] candidate = bodies['candidate'] subset = bodies['subset'] canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8) canvas = util.draw_bodypose(canvas, candidate, subset) canvas = util.draw_handpose(canvas, hands) canvas = util.draw_facepose(canvas, faces) return canvas class DWposeDetector: def __init__(self, det_config=None, det_ckpt=None, pose_config=None, pose_ckpt=None, device='cpu'): from .wholebody import Wholebody self.pose_estimation = Wholebody(det_config, det_ckpt, pose_config, pose_ckpt, device) def to(self, device): self.pose_estimation.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): input_image = cv2.cvtColor(np.array(input_image, dtype=np.uint8), cv2.COLOR_RGB2BGR) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) (H, W, C) = input_image.shape with torch.no_grad(): (candidate, subset) = self.pose_estimation(input_image) (nums, keys, locs) = candidate.shape candidate[..., 0] /= float(W) candidate[..., 1] /= float(H) body = candidate[:, :18].copy() body = body.reshape(nums * 18, locs) score = subset[:, :18] for i in range(len(score)): for j in range(len(score[i])): if score[i][j] > 0.3: score[i][j] = int(18 * i + j) else: score[i][j] = -1 un_visible = subset < 0.3 candidate[un_visible] = -1 foot = candidate[:, 18:24] faces = candidate[:, 24:92] hands = candidate[:, 92:113] hands = np.vstack([hands, candidate[:, 113:]]) bodies = dict(candidate=body, subset=score) pose = dict(bodies=bodies, hands=hands, faces=faces) detected_map = draw_pose(pose, H, W) detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/dwpose/dwpose_config/dwpose-l_384x288.py max_epochs = 270 stage2_num_epochs = 30 base_lr = 0.004 train_cfg = dict(max_epochs=max_epochs, val_interval=10) randomness = dict(seed=21) optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), paramwise_cfg=dict(norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) param_scheduler = [dict(type='LinearLR', start_factor=1e-05, by_epoch=False, begin=0, end=1000), dict(type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=max_epochs // 2, end=max_epochs, T_max=max_epochs // 2, by_epoch=True, convert_to_iter_based=True)] auto_scale_lr = dict(base_batch_size=512) codec = dict(type='SimCCLabel', input_size=(288, 384), sigma=(6.0, 6.93), simcc_split_ratio=2.0, normalize=False, use_dark=False) model = dict(type='TopdownPoseEstimator', data_preprocessor=dict(type='PoseDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], bgr_to_rgb=True), backbone=dict(_scope_='mmdet', type='CSPNeXt', arch='P5', expand_ratio=0.5, deepen_factor=1.0, widen_factor=1.0, out_indices=(4,), channel_attention=True, norm_cfg=dict(type='SyncBN'), act_cfg=dict(type='SiLU'), init_cfg=dict(type='Pretrained', prefix='backbone.', checkpoint='https://download.openmmlab.com/mmpose/v1/projects/rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth')), head=dict(type='RTMCCHead', in_channels=1024, out_channels=133, input_size=codec['input_size'], in_featuremap_size=(9, 12), simcc_split_ratio=codec['simcc_split_ratio'], final_layer_kernel_size=7, gau_cfg=dict(hidden_dims=256, s=128, expansion_factor=2, dropout_rate=0.0, drop_path=0.0, act_fn='SiLU', use_rel_bias=False, pos_enc=False), loss=dict(type='KLDiscretLoss', use_target_weight=True, beta=10.0, label_softmax=True), decoder=codec), test_cfg=dict(flip_test=True)) dataset_type = 'CocoWholeBodyDataset' data_mode = 'topdown' data_root = '/data/' backend_args = dict(backend='local') train_pipeline = [dict(type='LoadImage', backend_args=backend_args), dict(type='GetBBoxCenterScale'), dict(type='RandomFlip', direction='horizontal'), dict(type='RandomHalfBody'), dict(type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80), dict(type='TopdownAffine', input_size=codec['input_size']), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='Albumentation', transforms=[dict(type='Blur', p=0.1), dict(type='MedianBlur', p=0.1), dict(type='CoarseDropout', max_holes=1, max_height=0.4, max_width=0.4, min_holes=1, min_height=0.2, min_width=0.2, p=1.0)]), dict(type='GenerateTarget', encoder=codec), dict(type='PackPoseInputs')] val_pipeline = [dict(type='LoadImage', backend_args=backend_args), dict(type='GetBBoxCenterScale'), dict(type='TopdownAffine', input_size=codec['input_size']), dict(type='PackPoseInputs')] train_pipeline_stage2 = [dict(type='LoadImage', backend_args=backend_args), dict(type='GetBBoxCenterScale'), dict(type='RandomFlip', direction='horizontal'), dict(type='RandomHalfBody'), dict(type='RandomBBoxTransform', shift_factor=0.0, scale_factor=[0.75, 1.25], rotate_factor=60), dict(type='TopdownAffine', input_size=codec['input_size']), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='Albumentation', transforms=[dict(type='Blur', p=0.1), dict(type='MedianBlur', p=0.1), dict(type='CoarseDropout', max_holes=1, max_height=0.4, max_width=0.4, min_holes=1, min_height=0.2, min_width=0.2, p=0.5)]), dict(type='GenerateTarget', encoder=codec), dict(type='PackPoseInputs')] datasets = [] dataset_coco = dict(type=dataset_type, data_root=data_root, data_mode=data_mode, ann_file='coco/annotations/coco_wholebody_train_v1.0.json', data_prefix=dict(img='coco/train2017/'), pipeline=[]) datasets.append(dataset_coco) scene = ['Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow', 'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing', 'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference'] for i in range(len(scene)): datasets.append(dict(type=dataset_type, data_root=data_root, data_mode=data_mode, ann_file='UBody/annotations/' + scene[i] + '/keypoint_annotation.json', data_prefix=dict(img='UBody/images/' + scene[i] + '/'), pipeline=[])) train_dataloader = dict(batch_size=32, num_workers=10, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict(type='CombinedDataset', metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'), datasets=datasets, pipeline=train_pipeline, test_mode=False)) val_dataloader = dict(batch_size=32, num_workers=10, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False, round_up=False), dataset=dict(type=dataset_type, data_root=data_root, data_mode=data_mode, ann_file='coco/annotations/coco_wholebody_val_v1.0.json', bbox_file=f'{data_root}coco/person_detection_results/COCO_val2017_detections_AP_H_56_person.json', data_prefix=dict(img='coco/val2017/'), test_mode=True, pipeline=val_pipeline)) test_dataloader = val_dataloader default_hooks = dict(checkpoint=dict(save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1)) custom_hooks = [dict(type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, priority=49), dict(type='mmdet.PipelineSwitchHook', switch_epoch=max_epochs - stage2_num_epochs, switch_pipeline=train_pipeline_stage2)] val_evaluator = dict(type='CocoWholeBodyMetric', ann_file=data_root + 'coco/annotations/coco_wholebody_val_v1.0.json') test_evaluator = val_evaluator # File: controlnet_aux-master/src/controlnet_aux/dwpose/util.py import math import numpy as np import cv2 eps = 0.01 def smart_resize(x, s): (Ht, Wt) = s if x.ndim == 2: (Ho, Wo) = x.shape Co = 1 else: (Ho, Wo, Co) = x.shape if Co == 3 or Co == 1: k = float(Ht + Wt) / float(Ho + Wo) return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) else: return np.stack([smart_resize(x[:, :, i], s) for i in range(Co)], axis=2) def smart_resize_k(x, fx, fy): if x.ndim == 2: (Ho, Wo) = x.shape Co = 1 else: (Ho, Wo, Co) = x.shape (Ht, Wt) = (Ho * fy, Wo * fx) if Co == 3 or Co == 1: k = float(Ht + Wt) / float(Ho + Wo) return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) else: return np.stack([smart_resize_k(x[:, :, i], fx, fy) for i in range(Co)], axis=2) def padRightDownCorner(img, stride, padValue): h = img.shape[0] w = img.shape[1] pad = 4 * [None] pad[0] = 0 pad[1] = 0 pad[2] = 0 if h % stride == 0 else stride - h % stride pad[3] = 0 if w % stride == 0 else stride - w % stride img_padded = img pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1)) img_padded = np.concatenate((pad_up, img_padded), axis=0) pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1)) img_padded = np.concatenate((pad_left, img_padded), axis=1) pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1)) img_padded = np.concatenate((img_padded, pad_down), axis=0) pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1)) img_padded = np.concatenate((img_padded, pad_right), axis=1) return (img_padded, pad) def transfer(model, model_weights): transfered_model_weights = {} for weights_name in model.state_dict().keys(): transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] return transfered_model_weights def draw_bodypose(canvas, candidate, subset): (H, W, C) = canvas.shape candidate = np.array(candidate) subset = np.array(subset) stickwidth = 4 limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]] colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] for i in range(17): for n in range(len(subset)): index = subset[n][np.array(limbSeq[i]) - 1] if -1 in index: continue Y = candidate[index.astype(int), 0] * float(W) X = candidate[index.astype(int), 1] * float(H) mX = np.mean(X) mY = np.mean(Y) length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) cv2.fillConvexPoly(canvas, polygon, colors[i]) canvas = (canvas * 0.6).astype(np.uint8) for i in range(18): for n in range(len(subset)): index = int(subset[n][i]) if index == -1: continue (x, y) = candidate[index][0:2] x = int(x * W) y = int(y * H) cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) return canvas def draw_handpose(canvas, all_hand_peaks): import matplotlib (H, W, C) = canvas.shape edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] for i in range(len(all_hand_peaks)): peaks = all_hand_peaks[i] peaks = np.array(peaks) for (ie, e) in enumerate(edges): (x1, y1) = peaks[e[0]] (x2, y2) = peaks[e[1]] x1 = int(x1 * W) y1 = int(y1 * H) x2 = int(x2 * W) y2 = int(y2 * H) if x1 > eps and y1 > eps and (x2 > eps) and (y2 > eps): cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2) for (_, keyponit) in enumerate(peaks): (x, y) = keyponit x = int(x * W) y = int(y * H) if x > eps and y > eps: cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) return canvas def draw_facepose(canvas, all_lmks): (H, W, C) = canvas.shape for lmks in all_lmks: lmks = np.array(lmks) for lmk in lmks: (x, y) = lmk x = int(x * W) y = int(y * H) if x > eps and y > eps: cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1) return canvas def handDetect(candidate, subset, oriImg): ratioWristElbow = 0.33 detect_result = [] (image_height, image_width) = oriImg.shape[0:2] for person in subset.astype(int): has_left = np.sum(person[[5, 6, 7]] == -1) == 0 has_right = np.sum(person[[2, 3, 4]] == -1) == 0 if not (has_left or has_right): continue hands = [] if has_left: (left_shoulder_index, left_elbow_index, left_wrist_index) = person[[5, 6, 7]] (x1, y1) = candidate[left_shoulder_index][:2] (x2, y2) = candidate[left_elbow_index][:2] (x3, y3) = candidate[left_wrist_index][:2] hands.append([x1, y1, x2, y2, x3, y3, True]) if has_right: (right_shoulder_index, right_elbow_index, right_wrist_index) = person[[2, 3, 4]] (x1, y1) = candidate[right_shoulder_index][:2] (x2, y2) = candidate[right_elbow_index][:2] (x3, y3) = candidate[right_wrist_index][:2] hands.append([x1, y1, x2, y2, x3, y3, False]) for (x1, y1, x2, y2, x3, y3, is_left) in hands: x = x3 + ratioWristElbow * (x3 - x2) y = y3 + ratioWristElbow * (y3 - y2) distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) x -= width / 2 y -= width / 2 if x < 0: x = 0 if y < 0: y = 0 width1 = width width2 = width if x + width > image_width: width1 = image_width - x if y + width > image_height: width2 = image_height - y width = min(width1, width2) if width >= 20: detect_result.append([int(x), int(y), int(width), is_left]) '' return detect_result def faceDetect(candidate, subset, oriImg): detect_result = [] (image_height, image_width) = oriImg.shape[0:2] for person in subset.astype(int): has_head = person[0] > -1 if not has_head: continue has_left_eye = person[14] > -1 has_right_eye = person[15] > -1 has_left_ear = person[16] > -1 has_right_ear = person[17] > -1 if not (has_left_eye or has_right_eye or has_left_ear or has_right_ear): continue (head, left_eye, right_eye, left_ear, right_ear) = person[[0, 14, 15, 16, 17]] width = 0.0 (x0, y0) = candidate[head][:2] if has_left_eye: (x1, y1) = candidate[left_eye][:2] d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 3.0) if has_right_eye: (x1, y1) = candidate[right_eye][:2] d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 3.0) if has_left_ear: (x1, y1) = candidate[left_ear][:2] d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 1.5) if has_right_ear: (x1, y1) = candidate[right_ear][:2] d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 1.5) (x, y) = (x0, y0) x -= width y -= width if x < 0: x = 0 if y < 0: y = 0 width1 = width * 2 width2 = width * 2 if x + width > image_width: width1 = image_width - x if y + width > image_height: width2 = image_height - y width = min(width1, width2) if width >= 20: detect_result.append([int(x), int(y), int(width)]) return detect_result def npmax(array): arrayindex = array.argmax(1) arrayvalue = array.max(1) i = arrayvalue.argmax() j = arrayindex[i] return (i, j) # File: controlnet_aux-master/src/controlnet_aux/dwpose/wholebody.py import os import numpy as np import warnings try: import mmcv except ImportError: warnings.warn("The module 'mmcv' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmcv>=2.0.1'") try: from mmpose.apis import inference_topdown from mmpose.apis import init_model as init_pose_estimator from mmpose.evaluation.functional import nms from mmpose.utils import adapt_mmdet_pipeline from mmpose.structures import merge_data_samples except ImportError: warnings.warn("The module 'mmpose' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmpose>=1.1.0'") try: from mmdet.apis import inference_detector, init_detector except ImportError: warnings.warn("The module 'mmdet' is not installed. The package will have limited functionality. Please install it using the command: mim install 'mmdet>=3.1.0'") class Wholebody: def __init__(self, det_config=None, det_ckpt=None, pose_config=None, pose_ckpt=None, device='cpu'): if det_config is None: det_config = os.path.join(os.path.dirname(__file__), 'yolox_config/yolox_l_8xb8-300e_coco.py') if pose_config is None: pose_config = os.path.join(os.path.dirname(__file__), 'dwpose_config/dwpose-l_384x288.py') if det_ckpt is None: det_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth' if pose_ckpt is None: pose_ckpt = 'https://huggingface.co/wanghaofan/dw-ll_ucoco_384/resolve/main/dw-ll_ucoco_384.pth' self.detector = init_detector(det_config, det_ckpt, device=device) self.detector.cfg = adapt_mmdet_pipeline(self.detector.cfg) self.pose_estimator = init_pose_estimator(pose_config, pose_ckpt, device=device) def to(self, device): self.detector.to(device) self.pose_estimator.to(device) return self def __call__(self, oriImg): det_result = inference_detector(self.detector, oriImg) pred_instance = det_result.pred_instances.cpu().numpy() bboxes = np.concatenate((pred_instance.bboxes, pred_instance.scores[:, None]), axis=1) bboxes = bboxes[np.logical_and(pred_instance.labels == 0, pred_instance.scores > 0.5)] bboxes = bboxes[nms(bboxes, 0.7), :4] if len(bboxes) == 0: pose_results = inference_topdown(self.pose_estimator, oriImg) else: pose_results = inference_topdown(self.pose_estimator, oriImg, bboxes) preds = merge_data_samples(pose_results) preds = preds.pred_instances keypoints = preds.get('transformed_keypoints', preds.keypoints) if 'keypoint_scores' in preds: scores = preds.keypoint_scores else: scores = np.ones(keypoints.shape[:-1]) if 'keypoints_visible' in preds: visible = preds.keypoints_visible else: visible = np.ones(keypoints.shape[:-1]) keypoints_info = np.concatenate((keypoints, scores[..., None], visible[..., None]), axis=-1) neck = np.mean(keypoints_info[:, [5, 6]], axis=1) neck[:, 2:4] = np.logical_and(keypoints_info[:, 5, 2:4] > 0.3, keypoints_info[:, 6, 2:4] > 0.3).astype(int) new_keypoints_info = np.insert(keypoints_info, 17, neck, axis=1) mmpose_idx = [17, 6, 8, 10, 7, 9, 12, 14, 16, 13, 15, 2, 1, 4, 3] openpose_idx = [1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17] new_keypoints_info[:, openpose_idx] = new_keypoints_info[:, mmpose_idx] keypoints_info = new_keypoints_info (keypoints, scores, visible) = (keypoints_info[..., :2], keypoints_info[..., 2], keypoints_info[..., 3]) return (keypoints, scores) # File: controlnet_aux-master/src/controlnet_aux/dwpose/yolox_config/yolox_l_8xb8-300e_coco.py img_scale = (640, 640) model = dict(type='YOLOX', data_preprocessor=dict(type='DetDataPreprocessor', pad_size_divisor=32, batch_augments=[dict(type='BatchSyncRandomResize', random_size_range=(480, 800), size_divisor=32, interval=10)]), backbone=dict(type='CSPDarknet', deepen_factor=1.0, widen_factor=1.0, out_indices=(2, 3, 4), use_depthwise=False, spp_kernal_sizes=(5, 9, 13), norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish')), neck=dict(type='YOLOXPAFPN', in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3, use_depthwise=False, upsample_cfg=dict(scale_factor=2, mode='nearest'), norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish')), bbox_head=dict(type='YOLOXHead', num_classes=80, in_channels=256, feat_channels=256, stacked_convs=2, strides=(8, 16, 32), use_depthwise=False, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_bbox=dict(type='IoULoss', mode='square', eps=1e-16, reduction='sum', loss_weight=5.0), loss_obj=dict(type='CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=1.0), loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0)), train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) data_root = 'data/coco/' dataset_type = 'CocoDataset' backend_args = None train_pipeline = [dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict(type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='MixUp', img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', prob=0.5), dict(type='Resize', scale=img_scale, keep_ratio=True), dict(type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='PackDetInputs')] train_dataset = dict(type='MultiImageMixDataset', dataset=dict(type=dataset_type, data_root=data_root, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), pipeline=[dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='LoadAnnotations', with_bbox=True)], filter_cfg=dict(filter_empty_gt=False, min_size=32), backend_args=backend_args), pipeline=train_pipeline) test_pipeline = [dict(type='LoadImageFromFile', backend_args=backend_args), dict(type='Resize', scale=img_scale, keep_ratio=True), dict(type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='LoadAnnotations', with_bbox=True), dict(type='PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor'))] train_dataloader = dict(batch_size=8, num_workers=4, persistent_workers=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=train_dataset) val_dataloader = dict(batch_size=8, num_workers=4, persistent_workers=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict(type=dataset_type, data_root=data_root, ann_file='annotations/instances_val2017.json', data_prefix=dict(img='val2017/'), test_mode=True, pipeline=test_pipeline, backend_args=backend_args)) test_dataloader = val_dataloader val_evaluator = dict(type='CocoMetric', ann_file=data_root + 'annotations/instances_val2017.json', metric='bbox', backend_args=backend_args) test_evaluator = val_evaluator max_epochs = 300 num_last_epochs = 15 interval = 10 train_cfg = dict(max_epochs=max_epochs, val_interval=interval) base_lr = 0.01 optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=base_lr, momentum=0.9, weight_decay=0.0005, nesterov=True), paramwise_cfg=dict(norm_decay_mult=0.0, bias_decay_mult=0.0)) param_scheduler = [dict(type='mmdet.QuadraticWarmupLR', by_epoch=True, begin=0, end=5, convert_to_iter_based=True), dict(type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=5, T_max=max_epochs - num_last_epochs, end=max_epochs - num_last_epochs, by_epoch=True, convert_to_iter_based=True), dict(type='ConstantLR', by_epoch=True, factor=1, begin=max_epochs - num_last_epochs, end=max_epochs)] default_hooks = dict(checkpoint=dict(interval=interval, max_keep_ckpts=3)) custom_hooks = [dict(type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, priority=48), dict(type='SyncNormHook', priority=48), dict(type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0001, update_buffers=True, priority=49)] auto_scale_lr = dict(base_batch_size=64) # File: controlnet_aux-master/src/controlnet_aux/hed/__init__.py import os import warnings import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, nms, resize_image, safe_step class DoubleConvBlock(torch.nn.Module): def __init__(self, input_channel, output_channel, layer_number): super().__init__() self.convs = torch.nn.Sequential() self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) for i in range(1, layer_number): self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1)) self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0) def __call__(self, x, down_sampling=False): h = x if down_sampling: h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2)) for conv in self.convs: h = conv(h) h = torch.nn.functional.relu(h) return (h, self.projection(h)) class ControlNetHED_Apache2(torch.nn.Module): def __init__(self): super().__init__() self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1))) self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2) self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2) self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3) self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3) self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3) def __call__(self, x): h = x - self.norm (h, projection1) = self.block1(h) (h, projection2) = self.block2(h, down_sampling=True) (h, projection3) = self.block3(h, down_sampling=True) (h, projection4) = self.block4(h, down_sampling=True) (h, projection5) = self.block5(h, down_sampling=True) return (projection1, projection2, projection3, projection4, projection5) class HEDdetector: def __init__(self, netNetwork): self.netNetwork = netNetwork @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None, local_files_only=False): filename = filename or 'ControlNetHED.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) netNetwork = ControlNetHED_Apache2() netNetwork.load_state_dict(torch.load(model_path, map_location='cpu')) netNetwork.float().eval() return cls(netNetwork) def to(self, device): self.netNetwork.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, safe=False, output_type='pil', scribble=False, **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' device = next(iter(self.netNetwork.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 (H, W, C) = input_image.shape with torch.no_grad(): image_hed = torch.from_numpy(input_image.copy()).float().to(device) image_hed = rearrange(image_hed, 'h w c -> 1 c h w') edges = self.netNetwork(image_hed) edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges] edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges] edges = np.stack(edges, axis=2) edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64))) if safe: edge = safe_step(edge) edge = (edge * 255.0).clip(0, 255).astype(np.uint8) detected_map = edge detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if scribble: detected_map = nms(detected_map, 127, 3.0) detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) detected_map[detected_map > 4] = 255 detected_map[detected_map < 255] = 0 if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/leres/__init__.py import os import cv2 import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .leres.depthmap import estimateboost, estimateleres from .leres.multi_depth_model_woauxi import RelDepthModel from .leres.net_tools import strip_prefix_if_present from .pix2pix.models.pix2pix4depth_model import Pix2Pix4DepthModel from .pix2pix.options.test_options import TestOptions class LeresDetector: def __init__(self, model, pix2pixmodel): self.model = model self.pix2pixmodel = pix2pixmodel @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, pix2pix_filename=None, cache_dir=None, local_files_only=False): filename = filename or 'res101.pth' pix2pix_filename = pix2pix_filename or 'latest_net_G.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) checkpoint = torch.load(model_path, map_location=torch.device('cpu')) model = RelDepthModel(backbone='resnext101') model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], 'module.'), strict=True) del checkpoint if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, pix2pix_filename) else: model_path = hf_hub_download(pretrained_model_or_path, pix2pix_filename, cache_dir=cache_dir, local_files_only=local_files_only) opt = TestOptions().parse() if not torch.cuda.is_available(): opt.gpu_ids = [] pix2pixmodel = Pix2Pix4DepthModel(opt) pix2pixmodel.save_dir = os.path.dirname(model_path) pix2pixmodel.load_networks('latest') pix2pixmodel.eval() return cls(model, pix2pixmodel) def to(self, device): self.model.to(device) return self def __call__(self, input_image, thr_a=0, thr_b=0, boost=False, detect_resolution=512, image_resolution=512, output_type='pil'): device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 (height, width, dim) = input_image.shape with torch.no_grad(): if boost: depth = estimateboost(input_image, self.model, 0, self.pix2pixmodel, max(width, height)) else: depth = estimateleres(input_image, self.model, width, height) numbytes = 2 depth_min = depth.min() depth_max = depth.max() max_val = 2 ** (8 * numbytes) - 1 if depth_max - depth_min > np.finfo('float').eps: out = max_val * (depth - depth_min) / (depth_max - depth_min) else: out = np.zeros(depth.shape) depth_image = out.astype('uint16') depth_image = cv2.convertScaleAbs(depth_image, alpha=255.0 / 65535.0) if thr_a != 0: thr_a = thr_a / 100 * 255 depth_image = cv2.threshold(depth_image, thr_a, 255, cv2.THRESH_TOZERO)[1] depth_image = cv2.bitwise_not(depth_image) if thr_b != 0: thr_b = thr_b / 100 * 255 depth_image = cv2.threshold(depth_image, thr_b, 255, cv2.THRESH_TOZERO)[1] detected_map = depth_image detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/leres/leres/Resnet.py import torch.nn as nn import torch.nn as NN __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'] model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth'} def conv3x3(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = NN.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = NN.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = NN.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = NN.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = NN.BatchNorm2d(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = NN.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), NN.BatchNorm2d(planes * block.expansion)) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): features = [] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) features.append(x) x = self.layer2(x) features.append(x) x = self.layer3(x) features.append(x) x = self.layer4(x) features.append(x) return features def resnet18(pretrained=True, **kwargs): model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) return model def resnet34(pretrained=True, **kwargs): model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) return model def resnet50(pretrained=True, **kwargs): model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) return model def resnet101(pretrained=True, **kwargs): model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) return model def resnet152(pretrained=True, **kwargs): model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) return model # File: controlnet_aux-master/src/controlnet_aux/leres/leres/Resnext_torch.py import torch.nn as nn try: from urllib import urlretrieve except ImportError: from urllib.request import urlretrieve __all__ = ['resnext101_32x8d'] model_urls = {'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'} def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes, out_planes, stride=1): return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(BasicBlock, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError('BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError('Dilation > 1 not supported in BasicBlock') self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(Bottleneck, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential(conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion)) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def _forward_impl(self, x): features = [] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) features.append(x) x = self.layer2(x) features.append(x) x = self.layer3(x) features.append(x) x = self.layer4(x) features.append(x) return features def forward(self, x): return self._forward_impl(x) def resnext101_32x8d(pretrained=True, **kwargs): kwargs['groups'] = 32 kwargs['width_per_group'] = 8 model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) return model # File: controlnet_aux-master/src/controlnet_aux/leres/leres/depthmap.py import gc from operator import getitem import cv2 import numpy as np import skimage.measure import torch from torchvision.transforms import transforms from ...util import torch_gc whole_size_threshold = 1600 pix2pixsize = 1024 def scale_torch(img): if len(img.shape) == 2: img = img[np.newaxis, :, :] if img.shape[2] == 3: transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) img = transform(img.astype(np.float32)) else: img = img.astype(np.float32) img = torch.from_numpy(img) return img def estimateleres(img, model, w, h): device = next(iter(model.parameters())).device rgb_c = img[:, :, ::-1].copy() A_resize = cv2.resize(rgb_c, (w, h)) img_torch = scale_torch(A_resize)[None, :, :, :] with torch.no_grad(): img_torch = img_torch.to(device) prediction = model.depth_model(img_torch) prediction = prediction.squeeze().cpu().numpy() prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC) return prediction def generatemask(size): mask = np.zeros(size, dtype=np.float32) sigma = int(size[0] / 16) k_size = int(2 * np.ceil(2 * int(size[0] / 16)) + 1) mask[int(0.15 * size[0]):size[0] - int(0.15 * size[0]), int(0.15 * size[1]):size[1] - int(0.15 * size[1])] = 1 mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma) mask = (mask - mask.min()) / (mask.max() - mask.min()) mask = mask.astype(np.float32) return mask def resizewithpool(img, size): i_size = img.shape[0] n = int(np.floor(i_size / size)) out = skimage.measure.block_reduce(img, (n, n), np.max) return out def rgb2gray(rgb): return np.dot(rgb[..., :3], [0.2989, 0.587, 0.114]) def calculateprocessingres(img, basesize, confidence=0.1, scale_threshold=3, whole_size_threshold=3000): speed_scale = 32 image_dim = int(min(img.shape[0:2])) gray = rgb2gray(img) grad = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)) grad = cv2.resize(grad, (image_dim, image_dim), cv2.INTER_AREA) m = grad.min() M = grad.max() middle = m + 0.4 * (M - m) grad[grad < middle] = 0 grad[grad >= middle] = 1 kernel = np.ones((int(basesize / speed_scale), int(basesize / speed_scale)), float) kernel2 = np.ones((int(basesize / (4 * speed_scale)), int(basesize / (4 * speed_scale))), float) threshold = min(whole_size_threshold, scale_threshold * max(img.shape[:2])) outputsize_scale = basesize / speed_scale for p_size in range(int(basesize / speed_scale), int(threshold / speed_scale), int(basesize / (2 * speed_scale))): grad_resized = resizewithpool(grad, p_size) grad_resized = cv2.resize(grad_resized, (p_size, p_size), cv2.INTER_NEAREST) grad_resized[grad_resized >= 0.5] = 1 grad_resized[grad_resized < 0.5] = 0 dilated = cv2.dilate(grad_resized, kernel, iterations=1) meanvalue = (1 - dilated).mean() if meanvalue > confidence: break else: outputsize_scale = p_size grad_region = cv2.dilate(grad_resized, kernel2, iterations=1) patch_scale = grad_region.mean() return (int(outputsize_scale * speed_scale), patch_scale) def doubleestimate(img, size1, size2, pix2pixsize, model, net_type, pix2pixmodel): estimate1 = singleestimate(img, size1, model, net_type) estimate1 = cv2.resize(estimate1, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) estimate2 = singleestimate(img, size2, model, net_type) estimate2 = cv2.resize(estimate2, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) pix2pixmodel.set_input(estimate1, estimate2) pix2pixmodel.test() visuals = pix2pixmodel.get_current_visuals() prediction_mapped = visuals['fake_B'] prediction_mapped = (prediction_mapped + 1) / 2 prediction_mapped = (prediction_mapped - torch.min(prediction_mapped)) / (torch.max(prediction_mapped) - torch.min(prediction_mapped)) prediction_mapped = prediction_mapped.squeeze().cpu().numpy() return prediction_mapped def singleestimate(img, msize, model, net_type): return estimateleres(img, model, msize, msize) def applyGridpatch(blsize, stride, img, box): counter1 = 0 patch_bound_list = {} for k in range(blsize, img.shape[1] - blsize, stride): for j in range(blsize, img.shape[0] - blsize, stride): patch_bound_list[str(counter1)] = {} patchbounds = [j - blsize, k - blsize, j - blsize + 2 * blsize, k - blsize + 2 * blsize] patch_bound = [box[0] + patchbounds[1], box[1] + patchbounds[0], patchbounds[3] - patchbounds[1], patchbounds[2] - patchbounds[0]] patch_bound_list[str(counter1)]['rect'] = patch_bound patch_bound_list[str(counter1)]['size'] = patch_bound[2] counter1 = counter1 + 1 return patch_bound_list def generatepatchs(img, base_size): img_gray = rgb2gray(img) whole_grad = np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 0, 1, ksize=3)) + np.abs(cv2.Sobel(img_gray, cv2.CV_64F, 1, 0, ksize=3)) threshold = whole_grad[whole_grad > 0].mean() whole_grad[whole_grad < threshold] = 0 gf = whole_grad.sum() / len(whole_grad.reshape(-1)) grad_integral_image = cv2.integral(whole_grad) blsize = int(round(base_size / 2)) stride = int(round(blsize * 0.75)) patch_bound_list = applyGridpatch(blsize, stride, img, [0, 0, 0, 0]) print('Selecting patches ...') patch_bound_list = adaptiveselection(grad_integral_image, patch_bound_list, gf) patchset = sorted(patch_bound_list.items(), key=lambda x: getitem(x[1], 'size'), reverse=True) return patchset def getGF_fromintegral(integralimage, rect): x1 = rect[1] x2 = rect[1] + rect[3] y1 = rect[0] y2 = rect[0] + rect[2] value = integralimage[x2, y2] - integralimage[x1, y2] - integralimage[x2, y1] + integralimage[x1, y1] return value def adaptiveselection(integral_grad, patch_bound_list, gf): patchlist = {} count = 0 (height, width) = integral_grad.shape search_step = int(32 / factor) for c in range(len(patch_bound_list)): bbox = patch_bound_list[str(c)]['rect'] cgf = getGF_fromintegral(integral_grad, bbox) / (bbox[2] * bbox[3]) if cgf >= gf: bbox_test = bbox.copy() patchlist[str(count)] = {} while True: bbox_test[0] = bbox_test[0] - int(search_step / 2) bbox_test[1] = bbox_test[1] - int(search_step / 2) bbox_test[2] = bbox_test[2] + search_step bbox_test[3] = bbox_test[3] + search_step if bbox_test[0] < 0 or bbox_test[1] < 0 or bbox_test[1] + bbox_test[3] >= height or (bbox_test[0] + bbox_test[2] >= width): break cgf = getGF_fromintegral(integral_grad, bbox_test) / (bbox_test[2] * bbox_test[3]) if cgf < gf: break bbox = bbox_test.copy() patchlist[str(count)]['rect'] = bbox patchlist[str(count)]['size'] = bbox[2] count = count + 1 return patchlist def impatch(image, rect): w1 = rect[0] h1 = rect[1] w2 = w1 + rect[2] h2 = h1 + rect[3] image_patch = image[h1:h2, w1:w2] return image_patch class ImageandPatchs: def __init__(self, root_dir, name, patchsinfo, rgb_image, scale=1): self.root_dir = root_dir self.patchsinfo = patchsinfo self.name = name self.patchs = patchsinfo self.scale = scale self.rgb_image = cv2.resize(rgb_image, (round(rgb_image.shape[1] * scale), round(rgb_image.shape[0] * scale)), interpolation=cv2.INTER_CUBIC) self.do_have_estimate = False self.estimation_updated_image = None self.estimation_base_image = None def __len__(self): return len(self.patchs) def set_base_estimate(self, est): self.estimation_base_image = est if self.estimation_updated_image is not None: self.do_have_estimate = True def set_updated_estimate(self, est): self.estimation_updated_image = est if self.estimation_base_image is not None: self.do_have_estimate = True def __getitem__(self, index): patch_id = int(self.patchs[index][0]) rect = np.array(self.patchs[index][1]['rect']) msize = self.patchs[index][1]['size'] rect = np.round(rect * self.scale) rect = rect.astype('int') msize = round(msize * self.scale) patch_rgb = impatch(self.rgb_image, rect) if self.do_have_estimate: patch_whole_estimate_base = impatch(self.estimation_base_image, rect) patch_whole_estimate_updated = impatch(self.estimation_updated_image, rect) return {'patch_rgb': patch_rgb, 'patch_whole_estimate_base': patch_whole_estimate_base, 'patch_whole_estimate_updated': patch_whole_estimate_updated, 'rect': rect, 'size': msize, 'id': patch_id} else: return {'patch_rgb': patch_rgb, 'rect': rect, 'size': msize, 'id': patch_id} def print_options(self, opt): message = '' message += '----------------- Options ---------------\n' for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' print(message) '' def parse(self): opt = self.gather_options() opt.isTrain = self.isTrain if opt.suffix: suffix = '_' + opt.suffix.format(**vars(opt)) if opt.suffix != '' else '' opt.name = opt.name + suffix str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: opt.gpu_ids.append(id) self.opt = opt return self.opt def estimateboost(img, model, model_type, pix2pixmodel, max_res=512, depthmap_script_boost_rmax=None): global whole_size_threshold if depthmap_script_boost_rmax: whole_size_threshold = depthmap_script_boost_rmax if model_type == 0: net_receptive_field_size = 448 patch_netsize = 2 * net_receptive_field_size elif model_type == 1: net_receptive_field_size = 512 patch_netsize = 2 * net_receptive_field_size else: net_receptive_field_size = 384 patch_netsize = 2 * net_receptive_field_size gc.collect() torch_gc() mask_org = generatemask((3000, 3000)) mask = mask_org.copy() r_threshold_value = 0.2 input_resolution = img.shape scale_threshold = 3 (whole_image_optimal_size, patch_scale) = calculateprocessingres(img, net_receptive_field_size, r_threshold_value, scale_threshold, whole_size_threshold) whole_estimate = doubleestimate(img, net_receptive_field_size, whole_image_optimal_size, pix2pixsize, model, model_type, pix2pixmodel) global factor factor = max(min(1, 4 * patch_scale * whole_image_optimal_size / whole_size_threshold), 0.2) if max_res < whole_image_optimal_size: return cv2.resize(whole_estimate, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC) if img.shape[0] > img.shape[1]: a = 2 * whole_image_optimal_size b = round(2 * whole_image_optimal_size * img.shape[1] / img.shape[0]) else: a = round(2 * whole_image_optimal_size * img.shape[0] / img.shape[1]) b = 2 * whole_image_optimal_size b = int(round(b / factor)) a = int(round(a / factor)) '' img = cv2.resize(img, (b, a), interpolation=cv2.INTER_CUBIC) base_size = net_receptive_field_size * 2 patchset = generatepatchs(img, base_size) '' mergein_scale = input_resolution[0] / img.shape[0] imageandpatchs = ImageandPatchs('', '', patchset, img, mergein_scale) whole_estimate_resized = cv2.resize(whole_estimate, (round(img.shape[1] * mergein_scale), round(img.shape[0] * mergein_scale)), interpolation=cv2.INTER_CUBIC) imageandpatchs.set_base_estimate(whole_estimate_resized.copy()) imageandpatchs.set_updated_estimate(whole_estimate_resized.copy()) print('Resulting depthmap resolution will be :', whole_estimate_resized.shape[:2]) print('Patches to process: ' + str(len(imageandpatchs))) for patch_ind in range(len(imageandpatchs)): patch = imageandpatchs[patch_ind] patch_rgb = patch['patch_rgb'] patch_whole_estimate_base = patch['patch_whole_estimate_base'] rect = patch['rect'] patch_id = patch['id'] org_size = patch_whole_estimate_base.shape print('\t Processing patch', patch_ind, '/', len(imageandpatchs) - 1, '|', rect) patch_estimation = doubleestimate(patch_rgb, net_receptive_field_size, patch_netsize, pix2pixsize, model, model_type, pix2pixmodel) patch_estimation = cv2.resize(patch_estimation, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) patch_whole_estimate_base = cv2.resize(patch_whole_estimate_base, (pix2pixsize, pix2pixsize), interpolation=cv2.INTER_CUBIC) pix2pixmodel.set_input(patch_whole_estimate_base, patch_estimation) pix2pixmodel.test() visuals = pix2pixmodel.get_current_visuals() prediction_mapped = visuals['fake_B'] prediction_mapped = (prediction_mapped + 1) / 2 prediction_mapped = prediction_mapped.squeeze().cpu().numpy() mapped = prediction_mapped p_coef = np.polyfit(mapped.reshape(-1), patch_whole_estimate_base.reshape(-1), deg=1) merged = np.polyval(p_coef, mapped.reshape(-1)).reshape(mapped.shape) merged = cv2.resize(merged, (org_size[1], org_size[0]), interpolation=cv2.INTER_CUBIC) w1 = rect[0] h1 = rect[1] w2 = w1 + rect[2] h2 = h1 + rect[3] if mask.shape != org_size: mask = cv2.resize(mask_org, (org_size[1], org_size[0]), interpolation=cv2.INTER_LINEAR) tobemergedto = imageandpatchs.estimation_updated_image tobemergedto[h1:h2, w1:w2] = np.multiply(tobemergedto[h1:h2, w1:w2], 1 - mask) + np.multiply(merged, mask) imageandpatchs.set_updated_estimate(tobemergedto) return cv2.resize(imageandpatchs.estimation_updated_image, (input_resolution[1], input_resolution[0]), interpolation=cv2.INTER_CUBIC) # File: controlnet_aux-master/src/controlnet_aux/leres/leres/multi_depth_model_woauxi.py import torch import torch.nn as nn from . import network_auxi as network from .net_tools import get_func class RelDepthModel(nn.Module): def __init__(self, backbone='resnet50'): super(RelDepthModel, self).__init__() if backbone == 'resnet50': encoder = 'resnet50_stride32' elif backbone == 'resnext101': encoder = 'resnext101_stride32x8d' self.depth_model = DepthModel(encoder) def inference(self, rgb): with torch.no_grad(): input = rgb.to(self.depth_model.device) depth = self.depth_model(input) return depth class DepthModel(nn.Module): def __init__(self, encoder): super(DepthModel, self).__init__() backbone = network.__name__.split('.')[-1] + '.' + encoder self.encoder_modules = get_func(backbone)() self.decoder_modules = network.Decoder() def forward(self, x): lateral_out = self.encoder_modules(x) out_logit = self.decoder_modules(lateral_out) return out_logit # File: controlnet_aux-master/src/controlnet_aux/leres/leres/net_tools.py import importlib import torch import os from collections import OrderedDict def get_func(func_name): if func_name == '': return None try: parts = func_name.split('.') if len(parts) == 1: return globals()[parts[0]] module_name = 'controlnet_aux.leres.leres.' + '.'.join(parts[:-1]) module = importlib.import_module(module_name) return getattr(module, parts[-1]) except Exception: print('Failed to f1ind function: %s', func_name) raise def load_ckpt(args, depth_model, shift_model, focal_model): if os.path.isfile(args.load_ckpt): print('loading checkpoint %s' % args.load_ckpt) checkpoint = torch.load(args.load_ckpt) if shift_model is not None: shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'), strict=True) if focal_model is not None: focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'), strict=True) depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], 'module.'), strict=True) del checkpoint if torch.cuda.is_available(): torch.cuda.empty_cache() def strip_prefix_if_present(state_dict, prefix): keys = sorted(state_dict.keys()) if not all((key.startswith(prefix) for key in keys)): return state_dict stripped_state_dict = OrderedDict() for (key, value) in state_dict.items(): stripped_state_dict[key.replace(prefix, '')] = value return stripped_state_dict # File: controlnet_aux-master/src/controlnet_aux/leres/leres/network_auxi.py import torch import torch.nn as nn import torch.nn.init as init from . import Resnet, Resnext_torch def resnet50_stride32(): return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2]) def resnext101_stride32x8d(): return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2]) class Decoder(nn.Module): def __init__(self): super(Decoder, self).__init__() self.inchannels = [256, 512, 1024, 2048] self.midchannels = [256, 256, 256, 512] self.upfactors = [2, 2, 2, 2] self.outchannels = 1 self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3]) self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True) self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True) self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels=self.midchannels[2], upfactor=self.upfactors[2]) self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels=self.midchannels[1], upfactor=self.upfactors[1]) self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels=self.midchannels[0], upfactor=self.upfactors[0]) self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2) self._init_params() def _init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, features): x_32x = self.conv(features[3]) x_32 = self.conv1(x_32x) x_16 = self.upsample(x_32) x_8 = self.ffm2(features[2], x_16) x_4 = self.ffm1(features[1], x_8) x_2 = self.ffm0(features[0], x_4) x = self.outconv(x_2) return x class DepthNet(nn.Module): __factory = {18: Resnet.resnet18, 34: Resnet.resnet34, 50: Resnet.resnet50, 101: Resnet.resnet101, 152: Resnet.resnet152} def __init__(self, backbone='resnet', depth=50, upfactors=[2, 2, 2, 2]): super(DepthNet, self).__init__() self.backbone = backbone self.depth = depth self.pretrained = False self.inchannels = [256, 512, 1024, 2048] self.midchannels = [256, 256, 256, 512] self.upfactors = upfactors self.outchannels = 1 if self.backbone == 'resnet': if self.depth not in DepthNet.__factory: raise KeyError('Unsupported depth:', self.depth) self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained) elif self.backbone == 'resnext101_32x8d': self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained) else: self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained) def forward(self, x): x = self.encoder(x) return x class FTB(nn.Module): def __init__(self, inchannels, midchannels=512): super(FTB, self).__init__() self.in1 = inchannels self.mid = midchannels self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True) self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True), nn.BatchNorm2d(num_features=self.mid), nn.ReLU(inplace=True), nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True)) self.relu = nn.ReLU(inplace=True) self.init_params() def forward(self, x): x = self.conv1(x) x = x + self.conv_branch(x) x = self.relu(x) return x def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class ATA(nn.Module): def __init__(self, inchannels, reduction=8): super(ATA, self).__init__() self.inchannels = inchannels self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction), nn.ReLU(inplace=True), nn.Linear(self.inchannels // reduction, self.inchannels), nn.Sigmoid()) self.init_params() def forward(self, low_x, high_x): (n, c, _, _) = low_x.size() x = torch.cat([low_x, high_x], 1) x = self.avg_pool(x) x = x.view(n, -1) x = self.fc(x).view(n, c, 1, 1) x = low_x * x + high_x return x def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.xavier_normal_(m.weight) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.xavier_normal_(m.weight) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class FFM(nn.Module): def __init__(self, inchannels, midchannels, outchannels, upfactor=2): super(FFM, self).__init__() self.inchannels = inchannels self.midchannels = midchannels self.outchannels = outchannels self.upfactor = upfactor self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels) self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels) self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True) self.init_params() def forward(self, low_x, high_x): x = self.ftb1(low_x) x = x + high_x x = self.ftb2(x) x = self.upsample(x) return x def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class AO(nn.Module): def __init__(self, inchannels, outchannels, upfactor=2): super(AO, self).__init__() self.inchannels = inchannels self.outchannels = outchannels self.upfactor = upfactor self.adapt_conv = nn.Sequential(nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1, stride=1, bias=True), nn.BatchNorm2d(num_features=self.inchannels // 2), nn.ReLU(inplace=True), nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1, stride=1, bias=True), nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)) self.init_params() def forward(self, x): x = self.adapt_conv(x) return x def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class ResidualConv(nn.Module): def __init__(self, inchannels): super(ResidualConv, self).__init__() self.conv = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1, bias=False), nn.BatchNorm2d(num_features=inchannels / 2), nn.ReLU(inplace=False), nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1, bias=False)) self.init_params() def forward(self, x): x = self.conv(x) + x return x def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class FeatureFusion(nn.Module): def __init__(self, inchannels, outchannels): super(FeatureFusion, self).__init__() self.conv = ResidualConv(inchannels=inchannels) self.up = nn.Sequential(ResidualConv(inchannels=inchannels), nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3, stride=2, padding=1, output_padding=1), nn.BatchNorm2d(num_features=outchannels), nn.ReLU(inplace=True)) def forward(self, lowfeat, highfeat): return self.up(highfeat + self.conv(lowfeat)) def init_params(self): for m in self.modules(): if isinstance(m, nn.Conv2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): init.constant_(m.weight, 1) init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): init.normal_(m.weight, std=0.01) if m.bias is not None: init.constant_(m.bias, 0) class SenceUnderstand(nn.Module): def __init__(self, channels): super(SenceUnderstand, self).__init__() self.channels = channels self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1), nn.ReLU(inplace=True)) self.pool = nn.AdaptiveAvgPool2d(8) self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels), nn.ReLU(inplace=True)) self.conv2 = nn.Sequential(nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0), nn.ReLU(inplace=True)) self.initial_params() def forward(self, x): (n, c, h, w) = x.size() x = self.conv1(x) x = self.pool(x) x = x.view(n, -1) x = self.fc(x) x = x.view(n, self.channels, 1, 1) x = self.conv2(x) x = x.repeat(1, 1, h, w) return x def initial_params(self, dev=0.01): for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0, dev) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, nn.ConvTranspose2d): m.weight.data.normal_(0, dev) if m.bias is not None: m.bias.data.fill_(0) elif isinstance(m, nn.Linear): m.weight.data.normal_(0, dev) if __name__ == '__main__': net = DepthNet(depth=50, pretrained=True) print(net) inputs = torch.ones(4, 3, 128, 128) out = net(inputs) print(out.size()) # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/models/__init__.py """""" import importlib from .base_model import BaseModel def find_model_using_name(model_name): model_filename = 'controlnet_aux.leres.pix2pix.models.' + model_name + '_model' modellib = importlib.import_module(model_filename) model = None target_model_name = model_name.replace('_', '') + 'model' for (name, cls) in modellib.__dict__.items(): if name.lower() == target_model_name.lower() and issubclass(cls, BaseModel): model = cls if model is None: print('In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.' % (model_filename, target_model_name)) exit(0) return model def get_option_setter(model_name): model_class = find_model_using_name(model_name) return model_class.modify_commandline_options def create_model(opt): model = find_model_using_name(opt.model) instance = model(opt) print('model [%s] was created' % type(instance).__name__) return instance # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/models/base_model.py import gc import os from abc import ABC, abstractmethod from collections import OrderedDict import torch from ....util import torch_gc from . import networks class BaseModel(ABC): def __init__(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) if opt.preprocess != 'scale_width': torch.backends.cudnn.benchmark = True self.loss_names = [] self.model_names = [] self.visual_names = [] self.optimizers = [] self.image_paths = [] self.metric = 0 @staticmethod def modify_commandline_options(parser, is_train): return parser @abstractmethod def set_input(self, input): pass @abstractmethod def forward(self): pass @abstractmethod def optimize_parameters(self): pass def setup(self, opt): if self.isTrain: self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] if not self.isTrain or opt.continue_train: load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch self.load_networks(load_suffix) self.print_networks(opt.verbose) def eval(self): for name in self.model_names: if isinstance(name, str): net = getattr(self, 'net' + name) net.eval() def test(self): with torch.no_grad(): self.forward() self.compute_visuals() def compute_visuals(self): pass def get_image_paths(self): return self.image_paths def update_learning_rate(self): old_lr = self.optimizers[0].param_groups[0]['lr'] for scheduler in self.schedulers: if self.opt.lr_policy == 'plateau': scheduler.step(self.metric) else: scheduler.step() lr = self.optimizers[0].param_groups[0]['lr'] print('learning rate %.7f -> %.7f' % (old_lr, lr)) def get_current_visuals(self): visual_ret = OrderedDict() for name in self.visual_names: if isinstance(name, str): visual_ret[name] = getattr(self, name) return visual_ret def get_current_losses(self): errors_ret = OrderedDict() for name in self.loss_names: if isinstance(name, str): errors_ret[name] = float(getattr(self, 'loss_' + name)) return errors_ret def save_networks(self, epoch): for name in self.model_names: if isinstance(name, str): save_filename = '%s_net_%s.pth' % (epoch, name) save_path = os.path.join(self.save_dir, save_filename) net = getattr(self, 'net' + name) if len(self.gpu_ids) > 0 and torch.cuda.is_available(): torch.save(net.module.cpu().state_dict(), save_path) net.cuda(self.gpu_ids[0]) else: torch.save(net.cpu().state_dict(), save_path) def unload_network(self, name): if isinstance(name, str): net = getattr(self, 'net' + name) del net gc.collect() torch_gc() return None def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): key = keys[i] if i + 1 == len(keys): if module.__class__.__name__.startswith('InstanceNorm') and (key == 'running_mean' or key == 'running_var'): if getattr(module, key) is None: state_dict.pop('.'.join(keys)) if module.__class__.__name__.startswith('InstanceNorm') and key == 'num_batches_tracked': state_dict.pop('.'.join(keys)) else: self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) def load_networks(self, epoch): for name in self.model_names: if isinstance(name, str): load_filename = '%s_net_%s.pth' % (epoch, name) load_path = os.path.join(self.save_dir, load_filename) net = getattr(self, 'net' + name) if isinstance(net, torch.nn.DataParallel): net = net.module state_dict = torch.load(load_path, map_location=str(self.device)) if hasattr(state_dict, '_metadata'): del state_dict._metadata for key in list(state_dict.keys()): self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) net.load_state_dict(state_dict) def print_networks(self, verbose): print('---------- Networks initialized -------------') for name in self.model_names: if isinstance(name, str): net = getattr(self, 'net' + name) num_params = 0 for param in net.parameters(): num_params += param.numel() if verbose: print(net) print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1000000.0)) print('-----------------------------------------------') def set_requires_grad(self, nets, requires_grad=False): if not isinstance(nets, list): nets = [nets] for net in nets: if net is not None: for param in net.parameters(): param.requires_grad = requires_grad # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/models/base_model_hg.py import os import torch class BaseModelHG: def name(self): return 'BaseModel' def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) def set_input(self, input): self.input = input def forward(self): pass def test(self): pass def get_image_paths(self): pass def optimize_parameters(self): pass def get_current_visuals(self): return self.input def get_current_errors(self): return {} def save(self, label): pass def save_network(self, network, network_label, epoch_label, gpu_ids): save_filename = '_%s_net_%s.pth' % (epoch_label, network_label) save_path = os.path.join(self.save_dir, save_filename) torch.save(network.cpu().state_dict(), save_path) if len(gpu_ids) and torch.cuda.is_available(): network.cuda(device_id=gpu_ids[0]) def load_network(self, network, network_label, epoch_label): save_filename = '%s_net_%s.pth' % (epoch_label, network_label) save_path = os.path.join(self.save_dir, save_filename) print(save_path) model = torch.load(save_path) return model def update_learning_rate(): pass # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/models/networks.py import torch import torch.nn as nn from torch.nn import init import functools from torch.optim import lr_scheduler class Identity(nn.Module): def forward(self, x): return x def get_norm_layer(norm_type='instance'): if norm_type == 'batch': norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) elif norm_type == 'instance': norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) elif norm_type == 'none': def norm_layer(x): return Identity() else: raise NotImplementedError('normalization layer [%s] is not found' % norm_type) return norm_layer def get_scheduler(optimizer, opt): if opt.lr_policy == 'linear': def lambda_rule(epoch): lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif opt.lr_policy == 'step': scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif opt.lr_policy == 'plateau': scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif opt.lr_policy == 'cosine': scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler def init_weights(net, init_type='normal', init_gain=0.02): def init_func(m): classname = m.__class__.__name__ if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): if init_type == 'normal': init.normal_(m.weight.data, 0.0, init_gain) elif init_type == 'xavier': init.xavier_normal_(m.weight.data, gain=init_gain) elif init_type == 'kaiming': init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif init_type == 'orthogonal': init.orthogonal_(m.weight.data, gain=init_gain) else: raise NotImplementedError('initialization method [%s] is not implemented' % init_type) if hasattr(m, 'bias') and m.bias is not None: init.constant_(m.bias.data, 0.0) elif classname.find('BatchNorm2d') != -1: init.normal_(m.weight.data, 1.0, init_gain) init.constant_(m.bias.data, 0.0) net.apply(init_func) def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): if len(gpu_ids) > 0: assert torch.cuda.is_available() net.to(gpu_ids[0]) net = torch.nn.DataParallel(net, gpu_ids) init_weights(net, init_type, init_gain=init_gain) return net def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): net = None norm_layer = get_norm_layer(norm_type=norm) if netG == 'resnet_9blocks': net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) elif netG == 'resnet_6blocks': net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6) elif netG == 'resnet_12blocks': net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=12) elif netG == 'unet_128': net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout) elif netG == 'unet_256': net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout) elif netG == 'unet_672': net = UnetGenerator(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout) elif netG == 'unet_960': net = UnetGenerator(input_nc, output_nc, 6, ngf, norm_layer=norm_layer, use_dropout=use_dropout) elif netG == 'unet_1024': net = UnetGenerator(input_nc, output_nc, 10, ngf, norm_layer=norm_layer, use_dropout=use_dropout) else: raise NotImplementedError('Generator model name [%s] is not recognized' % netG) return init_net(net, init_type, init_gain, gpu_ids) def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): net = None norm_layer = get_norm_layer(norm_type=norm) if netD == 'basic': net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer) elif netD == 'n_layers': net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) elif netD == 'pixel': net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) else: raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) return init_net(net, init_type, init_gain, gpu_ids) class GANLoss(nn.Module): def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): super(GANLoss, self).__init__() self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) self.gan_mode = gan_mode if gan_mode == 'lsgan': self.loss = nn.MSELoss() elif gan_mode == 'vanilla': self.loss = nn.BCEWithLogitsLoss() elif gan_mode in ['wgangp']: self.loss = None else: raise NotImplementedError('gan mode %s not implemented' % gan_mode) def get_target_tensor(self, prediction, target_is_real): if target_is_real: target_tensor = self.real_label else: target_tensor = self.fake_label return target_tensor.expand_as(prediction) def __call__(self, prediction, target_is_real): if self.gan_mode in ['lsgan', 'vanilla']: target_tensor = self.get_target_tensor(prediction, target_is_real) loss = self.loss(prediction, target_tensor) elif self.gan_mode == 'wgangp': if target_is_real: loss = -prediction.mean() else: loss = prediction.mean() return loss def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): if lambda_gp > 0.0: if type == 'real': interpolatesv = real_data elif type == 'fake': interpolatesv = fake_data elif type == 'mixed': alpha = torch.rand(real_data.shape[0], 1, device=device) alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) interpolatesv = alpha * real_data + (1 - alpha) * fake_data else: raise NotImplementedError('{} not implemented'.format(type)) interpolatesv.requires_grad_(True) disc_interpolates = netD(interpolatesv) gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True) gradients = gradients[0].view(real_data.size(0), -1) gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp return (gradient_penalty, gradients) else: return (0.0, None) class ResnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): assert n_blocks >= 0 super(ResnetGenerator, self).__init__() if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)] n_downsampling = 2 for i in range(n_downsampling): mult = 2 ** i model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(ngf * mult * 2), nn.ReLU(True)] mult = 2 ** n_downsampling for i in range(n_blocks): model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] for i in range(n_downsampling): mult = 2 ** (n_downsampling - i) model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(ngf * mult / 2)), nn.ReLU(True)] model += [nn.ReflectionPad2d(3)] model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] model += [nn.Tanh()] self.model = nn.Sequential(*model) def forward(self, input): return self.model(input) class ResnetBlock(nn.Module): def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias): super(ResnetBlock, self).__init__() self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias) def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias): conv_block = [] p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type == 'replicate': conv_block += [nn.ReplicationPad2d(1)] elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)] if use_dropout: conv_block += [nn.Dropout(0.5)] p = 0 if padding_type == 'reflect': conv_block += [nn.ReflectionPad2d(1)] elif padding_type == 'replicate': conv_block += [nn.ReplicationPad2d(1)] elif padding_type == 'zero': p = 1 else: raise NotImplementedError('padding [%s] is not implemented' % padding_type) conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)] return nn.Sequential(*conv_block) def forward(self, x): out = x + self.conv_block(x) return out class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetGenerator, self).__init__() unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) for i in range(num_downs - 5): unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) def forward(self, input): return self.model(input) class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d if input_nc is None: input_nc = outer_nc downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc) if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([x, self.model(x)], 1) class NLayerDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): super(NLayerDiscriminator, self).__init__() if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d kw = 4 padw = 1 sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] nf_mult = 1 nf_mult_prev = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2 ** n, 8) sequence += [nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)] nf_mult_prev = nf_mult nf_mult = min(2 ** n_layers, 8) sequence += [nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] self.model = nn.Sequential(*sequence) def forward(self, input): return self.model(input) class PixelDiscriminator(nn.Module): def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): super(PixelDiscriminator, self).__init__() if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d self.net = [nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), nn.LeakyReLU(0.2, True), nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), norm_layer(ndf * 2), nn.LeakyReLU(0.2, True), nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] self.net = nn.Sequential(*self.net) def forward(self, input): return self.net(input) # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/models/pix2pix4depth_model.py import torch from .base_model import BaseModel from . import networks class Pix2Pix4DepthModel(BaseModel): @staticmethod def modify_commandline_options(parser, is_train=True): parser.set_defaults(input_nc=2, output_nc=1, norm='none', netG='unet_1024', dataset_mode='depthmerge') if is_train: parser.set_defaults(pool_size=0, gan_mode='vanilla') parser.add_argument('--lambda_L1', type=float, default=1000, help='weight for L1 loss') return parser def __init__(self, opt): BaseModel.__init__(self, opt) self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] if self.isTrain: self.visual_names = ['outer', 'inner', 'fake_B', 'real_B'] else: self.visual_names = ['fake_B'] if self.isTrain: self.model_names = ['G', 'D'] else: self.model_names = ['G'] self.netG = networks.define_G(opt.input_nc, opt.output_nc, 64, 'unet_1024', 'none', False, 'normal', 0.02, self.gpu_ids) if self.isTrain: self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) if self.isTrain: self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) self.criterionL1 = torch.nn.L1Loss() self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=0.0001, betas=(opt.beta1, 0.999)) self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=2e-06, betas=(opt.beta1, 0.999)) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D) def set_input_train(self, input): self.outer = input['data_outer'].to(self.device) self.outer = torch.nn.functional.interpolate(self.outer, (1024, 1024), mode='bilinear', align_corners=False) self.inner = input['data_inner'].to(self.device) self.inner = torch.nn.functional.interpolate(self.inner, (1024, 1024), mode='bilinear', align_corners=False) self.image_paths = input['image_path'] if self.isTrain: self.gtfake = input['data_gtfake'].to(self.device) self.gtfake = torch.nn.functional.interpolate(self.gtfake, (1024, 1024), mode='bilinear', align_corners=False) self.real_B = self.gtfake self.real_A = torch.cat((self.outer, self.inner), 1) def set_input(self, outer, inner): inner = torch.from_numpy(inner).unsqueeze(0).unsqueeze(0) outer = torch.from_numpy(outer).unsqueeze(0).unsqueeze(0) inner = (inner - torch.min(inner)) / (torch.max(inner) - torch.min(inner)) outer = (outer - torch.min(outer)) / (torch.max(outer) - torch.min(outer)) inner = self.normalize(inner) outer = self.normalize(outer) self.real_A = torch.cat((outer, inner), 1).to(self.device) def normalize(self, input): input = input * 2 input = input - 1 return input def forward(self): self.fake_B = self.netG(self.real_A) def backward_D(self): fake_AB = torch.cat((self.real_A, self.fake_B), 1) pred_fake = self.netD(fake_AB.detach()) self.loss_D_fake = self.criterionGAN(pred_fake, False) real_AB = torch.cat((self.real_A, self.real_B), 1) pred_real = self.netD(real_AB) self.loss_D_real = self.criterionGAN(pred_real, True) self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 self.loss_D.backward() def backward_G(self): fake_AB = torch.cat((self.real_A, self.fake_B), 1) pred_fake = self.netD(fake_AB) self.loss_G_GAN = self.criterionGAN(pred_fake, True) self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 self.loss_G = self.loss_G_L1 + self.loss_G_GAN self.loss_G.backward() def optimize_parameters(self): self.forward() self.set_requires_grad(self.netD, True) self.optimizer_D.zero_grad() self.backward_D() self.optimizer_D.step() self.set_requires_grad(self.netD, False) self.optimizer_G.zero_grad() self.backward_G() self.optimizer_G.step() # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/options/base_options.py import argparse import os from ...pix2pix.util import util from ...pix2pix import models import numpy as np class BaseOptions: def __init__(self): self.initialized = False def initialize(self, parser): parser.add_argument('--dataroot', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') parser.add_argument('--name', type=str, default='void', help='mahdi_unet_new, scaled_unet') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') parser.add_argument('--checkpoints_dir', type=str, default='./pix2pix/checkpoints', help='models are saved here') parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]') parser.add_argument('--input_nc', type=int, default=2, help='# of input image channels: 3 for RGB and 1 for grayscale') parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale') parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') parser.add_argument('--batch_size', type=int, default=1, help='input batch size') parser.add_argument('--load_size', type=int, default=672, help='scale images to this size') parser.add_argument('--crop_size', type=int, default=672, help='then crop to this size') parser.add_argument('--max_dataset_size', type=int, default=10000, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') parser.add_argument('--data_dir', type=str, required=False, help='input files directory images can be .png .jpg .tiff') parser.add_argument('--output_dir', type=str, required=False, help='result dir. result depth will be png. vides are JMPG as avi') parser.add_argument('--savecrops', type=int, required=False) parser.add_argument('--savewholeest', type=int, required=False) parser.add_argument('--output_resolution', type=int, required=False, help='0 for no restriction 1 for resize to input size') parser.add_argument('--net_receptive_field_size', type=int, required=False) parser.add_argument('--pix2pixsize', type=int, required=False) parser.add_argument('--generatevideo', type=int, required=False) parser.add_argument('--depthNet', type=int, required=False, help='0: midas 1:strurturedRL') parser.add_argument('--R0', action='store_true') parser.add_argument('--R20', action='store_true') parser.add_argument('--Final', action='store_true') parser.add_argument('--colorize_results', action='store_true') parser.add_argument('--max_res', type=float, default=np.inf) self.initialized = True return parser def gather_options(self): if not self.initialized: parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) (opt, _) = parser.parse_known_args() model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) (opt, _) = parser.parse_known_args() self.parser = parser return opt def print_options(self, opt): message = '' message += '----------------- Options ---------------\n' for (k, v) in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' print(message) expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n') def parse(self): opt = self.gather_options() opt.isTrain = self.isTrain if opt.suffix: suffix = '_' + opt.suffix.format(**vars(opt)) if opt.suffix != '' else '' opt.name = opt.name + suffix str_ids = opt.gpu_ids.split(',') opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: opt.gpu_ids.append(id) self.opt = opt return self.opt # File: controlnet_aux-master/src/controlnet_aux/leres/pix2pix/util/util.py """""" from __future__ import print_function import torch import numpy as np from PIL import Image import os def tensor2im(input_image, imtype=np.uint16): if not isinstance(input_image, np.ndarray): if isinstance(input_image, torch.Tensor): image_tensor = input_image.data else: return input_image image_numpy = torch.squeeze(image_tensor).cpu().numpy() image_numpy = (image_numpy + 1) / 2.0 * (2 ** 16 - 1) else: image_numpy = input_image return image_numpy.astype(imtype) def diagnose_network(net, name='network'): mean = 0.0 count = 0 for param in net.parameters(): if param.grad is not None: mean += torch.mean(torch.abs(param.grad.data)) count += 1 if count > 0: mean = mean / count print(name) print(mean) def save_image(image_numpy, image_path, aspect_ratio=1.0): image_pil = Image.fromarray(image_numpy) image_pil = image_pil.convert('I;16') image_pil.save(image_path) def print_numpy(x, val=True, shp=False): x = x.astype(np.float64) if shp: print('shape,', x.shape) if val: x = x.flatten() print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) def mkdirs(paths): if isinstance(paths, list) and (not isinstance(paths, str)): for path in paths: mkdir(path) else: mkdir(paths) def mkdir(path): if not os.path.exists(path): os.makedirs(path) # File: controlnet_aux-master/src/controlnet_aux/lineart/__init__.py import os import warnings import cv2 import numpy as np import torch import torch.nn as nn from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image norm_layer = nn.InstanceNorm2d class ResidualBlock(nn.Module): def __init__(self, in_features): super(ResidualBlock, self).__init__() conv_block = [nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), norm_layer(in_features), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(in_features, in_features, 3), norm_layer(in_features)] self.conv_block = nn.Sequential(*conv_block) def forward(self, x): return x + self.conv_block(x) class Generator(nn.Module): def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): super(Generator, self).__init__() model0 = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, 7), norm_layer(64), nn.ReLU(inplace=True)] self.model0 = nn.Sequential(*model0) model1 = [] in_features = 64 out_features = in_features * 2 for _ in range(2): model1 += [nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), norm_layer(out_features), nn.ReLU(inplace=True)] in_features = out_features out_features = in_features * 2 self.model1 = nn.Sequential(*model1) model2 = [] for _ in range(n_residual_blocks): model2 += [ResidualBlock(in_features)] self.model2 = nn.Sequential(*model2) model3 = [] out_features = in_features // 2 for _ in range(2): model3 += [nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), norm_layer(out_features), nn.ReLU(inplace=True)] in_features = out_features out_features = in_features // 2 self.model3 = nn.Sequential(*model3) model4 = [nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, 7)] if sigmoid: model4 += [nn.Sigmoid()] self.model4 = nn.Sequential(*model4) def forward(self, x, cond=None): out = self.model0(x) out = self.model1(out) out = self.model2(out) out = self.model3(out) out = self.model4(out) return out class LineartDetector: def __init__(self, model, coarse_model): self.model = model self.model_coarse = coarse_model @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, coarse_filename=None, cache_dir=None, local_files_only=False): filename = filename or 'sk_model.pth' coarse_filename = coarse_filename or 'sk_model2.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) coarse_model_path = os.path.join(pretrained_model_or_path, coarse_filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) coarse_model_path = hf_hub_download(pretrained_model_or_path, coarse_filename, cache_dir=cache_dir, local_files_only=local_files_only) model = Generator(3, 1, 3) model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))) model.eval() coarse_model = Generator(3, 1, 3) coarse_model.load_state_dict(torch.load(coarse_model_path, map_location=torch.device('cpu'))) coarse_model.eval() return cls(model, coarse_model) def to(self, device): self.model.to(device) self.model_coarse.to(device) return self def __call__(self, input_image, coarse=False, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) model = self.model_coarse if coarse else self.model assert input_image.ndim == 3 image = input_image with torch.no_grad(): image = torch.from_numpy(image).float().to(device) image = image / 255.0 image = rearrange(image, 'h w c -> 1 c h w') line = model(image)[0][0] line = line.cpu().numpy() line = (line * 255.0).clip(0, 255).astype(np.uint8) detected_map = line detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) detected_map = 255 - detected_map if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/lineart_anime/__init__.py import functools import os import warnings import cv2 import numpy as np import torch import torch.nn as nn from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetGenerator, self).__init__() unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) for _ in range(num_downs - 5): unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout) unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer) unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer) self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) def forward(self, input): return self.model(input) class UnetSkipConnectionBlock(nn.Module): def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d if input_nc is None: input_nc = outer_nc downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc) if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model) def forward(self, x): if self.outermost: return self.model(x) else: return torch.cat([x, self.model(x)], 1) class LineartAnimeDetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None, local_files_only=False): filename = filename or 'netG.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) net = UnetGenerator(3, 1, 8, 64, norm_layer=norm_layer, use_dropout=False) ckpt = torch.load(model_path) for key in list(ckpt.keys()): if 'module.' in key: ckpt[key.replace('module.', '')] = ckpt[key] del ckpt[key] net.load_state_dict(ckpt) net.eval() return cls(net) def to(self, device): self.model.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) (H, W, C) = input_image.shape Hn = 256 * int(np.ceil(float(H) / 256.0)) Wn = 256 * int(np.ceil(float(W) / 256.0)) img = cv2.resize(input_image, (Wn, Hn), interpolation=cv2.INTER_CUBIC) with torch.no_grad(): image_feed = torch.from_numpy(img).float().to(device) image_feed = image_feed / 127.5 - 1.0 image_feed = rearrange(image_feed, 'h w c -> 1 c h w') line = self.model(image_feed)[0, 0] * 127.5 + 127.5 line = line.cpu().numpy() line = cv2.resize(line, (W, H), interpolation=cv2.INTER_CUBIC) line = line.clip(0, 255).astype(np.uint8) detected_map = line detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) detected_map = 255 - detected_map if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/lineart_standard/__init__.py import cv2 import numpy as np from PIL import Image from ..util import HWC3, resize_image class LineartStandardDetector: def __call__(self, input_image=None, guassian_sigma=6.0, intensity_threshold=8, detect_resolution=512, output_type='pil'): if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) else: output_type = output_type or 'np' (original_height, original_width, _) = input_image.shape input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) x = input_image.astype(np.float32) g = cv2.GaussianBlur(x, (0, 0), guassian_sigma) intensity = np.min(g - x, axis=2).clip(0, 255) intensity /= max(16, np.median(intensity[intensity > intensity_threshold])) intensity *= 127 detected_map = intensity.clip(0, 255).astype(np.uint8) detected_map = HWC3(detected_map) detected_map = cv2.resize(detected_map, (original_width, original_height), interpolation=cv2.INTER_CUBIC) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/mediapipe_face/__init__.py import warnings from typing import Union import cv2 import numpy as np from PIL import Image from ..util import HWC3, resize_image from .mediapipe_face_common import generate_annotation class MediapipeFaceDetector: def __call__(self, input_image: Union[np.ndarray, Image.Image]=None, max_faces: int=1, min_confidence: float=0.5, output_type: str='pil', detect_resolution: int=512, image_resolution: int=512, **kwargs): if 'image' in kwargs: warnings.warn('image is deprecated, please use `input_image=...` instead.', DeprecationWarning) input_image = kwargs.pop('image') if input_image is None: raise ValueError('input_image must be defined.') if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) detected_map = generate_annotation(input_image, max_faces, min_confidence) detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/mediapipe_face/mediapipe_face_common.py from typing import Mapping import warnings try: import mediapipe as mp except ImportError: warnings.warn("The module 'mediapipe' is not installed. The package will have limited functionality. Please install it using the command: pip install 'mediapipe'") mp = None import numpy if mp: mp_drawing = mp.solutions.drawing_utils mp_drawing_styles = mp.solutions.drawing_styles mp_face_detection = mp.solutions.face_detection mp_face_mesh = mp.solutions.face_mesh mp_face_connections = mp.solutions.face_mesh_connections.FACEMESH_TESSELATION mp_hand_connections = mp.solutions.hands_connections.HAND_CONNECTIONS mp_body_connections = mp.solutions.pose_connections.POSE_CONNECTIONS DrawingSpec = mp.solutions.drawing_styles.DrawingSpec PoseLandmark = mp.solutions.drawing_styles.PoseLandmark min_face_size_pixels: int = 64 f_thick = 2 f_rad = 1 right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad) right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad) right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad) left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad) left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad) left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad) mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad) head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad) face_connection_spec = {} for edge in mp_face_mesh.FACEMESH_FACE_OVAL: face_connection_spec[edge] = head_draw for edge in mp_face_mesh.FACEMESH_LEFT_EYE: face_connection_spec[edge] = left_eye_draw for edge in mp_face_mesh.FACEMESH_LEFT_EYEBROW: face_connection_spec[edge] = left_eyebrow_draw for edge in mp_face_mesh.FACEMESH_RIGHT_EYE: face_connection_spec[edge] = right_eye_draw for edge in mp_face_mesh.FACEMESH_RIGHT_EYEBROW: face_connection_spec[edge] = right_eyebrow_draw for edge in mp_face_mesh.FACEMESH_LIPS: face_connection_spec[edge] = mouth_draw iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw} def draw_pupils(image, landmark_list, drawing_spec, halfwidth: int=2): if len(image.shape) != 3: raise ValueError('Input image must be H,W,C.') (image_rows, image_cols, image_channels) = image.shape if image_channels != 3: raise ValueError('Input image must contain three channel bgr data.') for (idx, landmark) in enumerate(landmark_list.landmark): if landmark.HasField('visibility') and landmark.visibility < 0.9 or (landmark.HasField('presence') and landmark.presence < 0.5): continue if landmark.x >= 1.0 or landmark.x < 0 or landmark.y >= 1.0 or (landmark.y < 0): continue image_x = int(image_cols * landmark.x) image_y = int(image_rows * landmark.y) draw_color = None if isinstance(drawing_spec, Mapping): if drawing_spec.get(idx) is None: continue else: draw_color = drawing_spec[idx].color elif isinstance(drawing_spec, DrawingSpec): draw_color = drawing_spec.color image[image_y - halfwidth:image_y + halfwidth, image_x - halfwidth:image_x + halfwidth, :] = draw_color def reverse_channels(image): return image[:, :, ::-1] def generate_annotation(img_rgb, max_faces: int, min_confidence: float): with mp_face_mesh.FaceMesh(static_image_mode=True, max_num_faces=max_faces, refine_landmarks=True, min_detection_confidence=min_confidence) as facemesh: (img_height, img_width, img_channels) = img_rgb.shape assert img_channels == 3 results = facemesh.process(img_rgb).multi_face_landmarks if results is None: print('No faces detected in controlnet image for Mediapipe face annotator.') return numpy.zeros_like(img_rgb) filtered_landmarks = [] for lm in results: landmarks = lm.landmark face_rect = [landmarks[0].x, landmarks[0].y, landmarks[0].x, landmarks[0].y] for i in range(len(landmarks)): face_rect[0] = min(face_rect[0], landmarks[i].x) face_rect[1] = min(face_rect[1], landmarks[i].y) face_rect[2] = max(face_rect[2], landmarks[i].x) face_rect[3] = max(face_rect[3], landmarks[i].y) if min_face_size_pixels > 0: face_width = abs(face_rect[2] - face_rect[0]) face_height = abs(face_rect[3] - face_rect[1]) face_width_pixels = face_width * img_width face_height_pixels = face_height * img_height face_size = min(face_width_pixels, face_height_pixels) if face_size >= min_face_size_pixels: filtered_landmarks.append(lm) else: filtered_landmarks.append(lm) empty = numpy.zeros_like(img_rgb) for face_landmarks in filtered_landmarks: mp_drawing.draw_landmarks(empty, face_landmarks, connections=face_connection_spec.keys(), landmark_drawing_spec=None, connection_drawing_spec=face_connection_spec) draw_pupils(empty, face_landmarks, iris_landmark_spec, 2) empty = reverse_channels(empty).copy() return empty # File: controlnet_aux-master/src/controlnet_aux/midas/__init__.py import os import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .api import MiDaSInference class MidasDetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, model_type='dpt_hybrid', filename=None, cache_dir=None, local_files_only=False): if pretrained_model_or_path == 'lllyasviel/ControlNet': filename = filename or 'annotator/ckpts/dpt_hybrid-midas-501f0c75.pt' else: filename = filename or 'dpt_hybrid-midas-501f0c75.pt' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) model = MiDaSInference(model_type=model_type, model_path=model_path) return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1, depth_and_normal=False, detect_resolution=512, image_resolution=512, output_type=None): device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) output_type = output_type or 'pil' else: output_type = output_type or 'np' input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 image_depth = input_image with torch.no_grad(): image_depth = torch.from_numpy(image_depth).float() image_depth = image_depth.to(device) image_depth = image_depth / 127.5 - 1.0 image_depth = rearrange(image_depth, 'h w c -> 1 c h w') depth = self.model(image_depth)[0] depth_pt = depth.clone() depth_pt -= torch.min(depth_pt) depth_pt /= torch.max(depth_pt) depth_pt = depth_pt.cpu().numpy() depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8) if depth_and_normal: depth_np = depth.cpu().numpy() x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3) y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3) z = np.ones_like(x) * a x[depth_pt < bg_th] = 0 y[depth_pt < bg_th] = 0 normal = np.stack([x, y, z], axis=2) normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5 normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)[:, :, ::-1] depth_image = HWC3(depth_image) if depth_and_normal: normal_image = HWC3(normal_image) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape depth_image = cv2.resize(depth_image, (W, H), interpolation=cv2.INTER_LINEAR) if depth_and_normal: normal_image = cv2.resize(normal_image, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': depth_image = Image.fromarray(depth_image) if depth_and_normal: normal_image = Image.fromarray(normal_image) if depth_and_normal: return (depth_image, normal_image) else: return depth_image # File: controlnet_aux-master/src/controlnet_aux/midas/api.py import cv2 import os import torch import torch.nn as nn from torchvision.transforms import Compose from .midas.dpt_depth import DPTDepthModel from .midas.midas_net import MidasNet from .midas.midas_net_custom import MidasNet_small from .midas.transforms import Resize, NormalizeImage, PrepareForNet from ..util import annotator_ckpts_path ISL_PATHS = {'dpt_large': os.path.join(annotator_ckpts_path, 'dpt_large-midas-2f21e586.pt'), 'dpt_hybrid': os.path.join(annotator_ckpts_path, 'dpt_hybrid-midas-501f0c75.pt'), 'midas_v21': '', 'midas_v21_small': ''} remote_model_path = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt' def disabled_train(self, mode=True): return self def load_midas_transform(model_type): if model_type == 'dpt_large': (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_hybrid': (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'midas_v21': (net_w, net_h) = (384, 384) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif model_type == 'midas_v21_small': (net_w, net_h) = (256, 256) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: assert False, f"model_type '{model_type}' not implemented, use: --model_type large" transform = Compose([Resize(net_w, net_h, resize_target=None, keep_aspect_ratio=True, ensure_multiple_of=32, resize_method=resize_mode, image_interpolation_method=cv2.INTER_CUBIC), normalization, PrepareForNet()]) return transform def load_model(model_type, model_path=None): model_path = model_path or ISL_PATHS[model_type] if model_type == 'dpt_large': model = DPTDepthModel(path=model_path, backbone='vitl16_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_hybrid': if not os.path.exists(model_path): from basicsr.utils.download_util import load_file_from_url load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path) model = DPTDepthModel(path=model_path, backbone='vitb_rn50_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'midas_v21': model = MidasNet(model_path, non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif model_type == 'midas_v21_small': model = MidasNet_small(model_path, features=64, backbone='efficientnet_lite3', exportable=True, non_negative=True, blocks={'expand': True}) (net_w, net_h) = (256, 256) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: print(f"model_type '{model_type}' not implemented, use: --model_type large") assert False transform = Compose([Resize(net_w, net_h, resize_target=None, keep_aspect_ratio=True, ensure_multiple_of=32, resize_method=resize_mode, image_interpolation_method=cv2.INTER_CUBIC), normalization, PrepareForNet()]) return (model.eval(), transform) class MiDaSInference(nn.Module): MODEL_TYPES_TORCH_HUB = ['DPT_Large', 'DPT_Hybrid', 'MiDaS_small'] MODEL_TYPES_ISL = ['dpt_large', 'dpt_hybrid', 'midas_v21', 'midas_v21_small'] def __init__(self, model_type, model_path): super().__init__() assert model_type in self.MODEL_TYPES_ISL (model, _) = load_model(model_type, model_path) self.model = model self.model.train = disabled_train def forward(self, x): with torch.no_grad(): prediction = self.model(x) return prediction # File: controlnet_aux-master/src/controlnet_aux/midas/midas/base_model.py import torch class BaseModel(torch.nn.Module): def load(self, path): parameters = torch.load(path, map_location=torch.device('cpu')) if 'optimizer' in parameters: parameters = parameters['model'] self.load_state_dict(parameters) # File: controlnet_aux-master/src/controlnet_aux/midas/midas/blocks.py import torch import torch.nn as nn from .vit import _make_pretrained_vitb_rn50_384, _make_pretrained_vitl16_384, _make_pretrained_vitb16_384, forward_vit def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout='ignore'): if backbone == 'vitl16_384': pretrained = _make_pretrained_vitl16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([256, 512, 1024, 1024], features, groups=groups, expand=expand) elif backbone == 'vitb_rn50_384': pretrained = _make_pretrained_vitb_rn50_384(use_pretrained, hooks=hooks, use_vit_only=use_vit_only, use_readout=use_readout) scratch = _make_scratch([256, 512, 768, 768], features, groups=groups, expand=expand) elif backbone == 'vitb16_384': pretrained = _make_pretrained_vitb16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([96, 192, 384, 768], features, groups=groups, expand=expand) elif backbone == 'resnext101_wsl': pretrained = _make_pretrained_resnext101_wsl(use_pretrained) scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) elif backbone == 'efficientnet_lite3': pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) else: print(f"Backbone '{backbone}' not implemented") assert False return (pretrained, scratch) def _make_scratch(in_shape, out_shape, groups=1, expand=False): scratch = nn.Module() out_shape1 = out_shape out_shape2 = out_shape out_shape3 = out_shape out_shape4 = out_shape if expand == True: out_shape1 = out_shape out_shape2 = out_shape * 2 out_shape3 = out_shape * 4 out_shape4 = out_shape * 8 scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) return scratch def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): efficientnet = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'tf_efficientnet_lite3', pretrained=use_pretrained, exportable=exportable) return _make_efficientnet_backbone(efficientnet) def _make_efficientnet_backbone(effnet): pretrained = nn.Module() pretrained.layer1 = nn.Sequential(effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]) pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) return pretrained def _make_resnet_backbone(resnet): pretrained = nn.Module() pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1) pretrained.layer2 = resnet.layer2 pretrained.layer3 = resnet.layer3 pretrained.layer4 = resnet.layer4 return pretrained def _make_pretrained_resnext101_wsl(use_pretrained): resnet = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl') return _make_resnet_backbone(resnet) class Interpolate(nn.Module): def __init__(self, scale_factor, mode, align_corners=False): super(Interpolate, self).__init__() self.interp = nn.functional.interpolate self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners def forward(self, x): x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) return x class ResidualConvUnit(nn.Module): def __init__(self, features): super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x class FeatureFusionBlock(nn.Module): def __init__(self, features): super(FeatureFusionBlock, self).__init__() self.resConfUnit1 = ResidualConvUnit(features) self.resConfUnit2 = ResidualConvUnit(features) def forward(self, *xs): output = xs[0] if len(xs) == 2: output += self.resConfUnit1(xs[1]) output = self.resConfUnit2(output) output = nn.functional.interpolate(output, scale_factor=2, mode='bilinear', align_corners=True) return output class ResidualConvUnit_custom(nn.Module): def __init__(self, features, activation, bn): super().__init__() self.bn = bn self.groups = 1 self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) if self.bn == True: self.bn1 = nn.BatchNorm2d(features) self.bn2 = nn.BatchNorm2d(features) self.activation = activation self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): out = self.activation(x) out = self.conv1(out) if self.bn == True: out = self.bn1(out) out = self.activation(out) out = self.conv2(out) if self.bn == True: out = self.bn2(out) if self.groups > 1: out = self.conv_merge(out) return self.skip_add.add(out, x) class FeatureFusionBlock_custom(nn.Module): def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): super(FeatureFusionBlock_custom, self).__init__() self.deconv = deconv self.align_corners = align_corners self.groups = 1 self.expand = expand out_features = features if self.expand == True: out_features = features // 2 self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) self.skip_add = nn.quantized.FloatFunctional() def forward(self, *xs): output = xs[0] if len(xs) == 2: res = self.resConfUnit1(xs[1]) output = self.skip_add.add(output, res) output = self.resConfUnit2(output) output = nn.functional.interpolate(output, scale_factor=2, mode='bilinear', align_corners=self.align_corners) output = self.out_conv(output) return output # File: controlnet_aux-master/src/controlnet_aux/midas/midas/dpt_depth.py import torch import torch.nn as nn import torch.nn.functional as F from .base_model import BaseModel from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder, forward_vit def _make_fusion_block(features, use_bn): return FeatureFusionBlock_custom(features, nn.ReLU(False), deconv=False, bn=use_bn, expand=False, align_corners=True) class DPT(BaseModel): def __init__(self, head, features=256, backbone='vitb_rn50_384', readout='project', channels_last=False, use_bn=False): super(DPT, self).__init__() self.channels_last = channels_last hooks = {'vitb_rn50_384': [0, 1, 8, 11], 'vitb16_384': [2, 5, 8, 11], 'vitl16_384': [5, 11, 17, 23]} (self.pretrained, self.scratch) = _make_encoder(backbone, features, False, groups=1, expand=False, exportable=False, hooks=hooks[backbone], use_readout=readout) self.scratch.refinenet1 = _make_fusion_block(features, use_bn) self.scratch.refinenet2 = _make_fusion_block(features, use_bn) self.scratch.refinenet3 = _make_fusion_block(features, use_bn) self.scratch.refinenet4 = _make_fusion_block(features, use_bn) self.scratch.output_conv = head def forward(self, x): if self.channels_last == True: x.contiguous(memory_format=torch.channels_last) (layer_1, layer_2, layer_3, layer_4) = forward_vit(self.pretrained, x) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return out class DPTDepthModel(DPT): def __init__(self, path=None, non_negative=True, **kwargs): features = kwargs['features'] if 'features' in kwargs else 256 head = nn.Sequential(nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity()) super().__init__(head, **kwargs) if path is not None: self.load(path) def forward(self, x): return super().forward(x).squeeze(dim=1) # File: controlnet_aux-master/src/controlnet_aux/midas/midas/midas_net.py """""" import torch import torch.nn as nn from .base_model import BaseModel from .blocks import FeatureFusionBlock, Interpolate, _make_encoder class MidasNet(BaseModel): def __init__(self, path=None, features=256, non_negative=True): print('Loading weights: ', path) super(MidasNet, self).__init__() use_pretrained = False if path is None else True (self.pretrained, self.scratch) = _make_encoder(backbone='resnext101_wsl', features=features, use_pretrained=use_pretrained) self.scratch.refinenet4 = FeatureFusionBlock(features) self.scratch.refinenet3 = FeatureFusionBlock(features) self.scratch.refinenet2 = FeatureFusionBlock(features) self.scratch.refinenet1 = FeatureFusionBlock(features) self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'), nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity()) if path: self.load(path) def forward(self, x): layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1) # File: controlnet_aux-master/src/controlnet_aux/midas/midas/midas_net_custom.py """""" import torch import torch.nn as nn from .base_model import BaseModel from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder class MidasNet_small(BaseModel): def __init__(self, path=None, features=64, backbone='efficientnet_lite3', non_negative=True, exportable=True, channels_last=False, align_corners=True, blocks={'expand': True}): print('Loading weights: ', path) super(MidasNet_small, self).__init__() use_pretrained = False if path else True self.channels_last = channels_last self.blocks = blocks self.backbone = backbone self.groups = 1 features1 = features features2 = features features3 = features features4 = features self.expand = False if 'expand' in self.blocks and self.blocks['expand'] == True: self.expand = True features1 = features features2 = features * 2 features3 = features * 4 features4 = features * 8 (self.pretrained, self.scratch) = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) self.scratch.activation = nn.ReLU(False) self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1, groups=self.groups), Interpolate(scale_factor=2, mode='bilinear'), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), self.scratch.activation, nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity()) if path: self.load(path) def forward(self, x): if self.channels_last == True: print('self.channels_last = ', self.channels_last) x.contiguous(memory_format=torch.channels_last) layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1) def fuse_model(m): prev_previous_type = nn.Identity() prev_previous_name = '' previous_type = nn.Identity() previous_name = '' for (name, module) in m.named_modules(): if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and (type(module) == nn.ReLU): torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) prev_previous_type = previous_type prev_previous_name = previous_name previous_type = type(module) previous_name = name # File: controlnet_aux-master/src/controlnet_aux/midas/midas/transforms.py import numpy as np import cv2 import math def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): shape = list(sample['disparity'].shape) if shape[0] >= size[0] and shape[1] >= size[1]: return sample scale = [0, 0] scale[0] = size[0] / shape[0] scale[1] = size[1] / shape[1] scale = max(scale) shape[0] = math.ceil(scale * shape[0]) shape[1] = math.ceil(scale * shape[1]) sample['image'] = cv2.resize(sample['image'], tuple(shape[::-1]), interpolation=image_interpolation_method) sample['disparity'] = cv2.resize(sample['disparity'], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST) sample['mask'] = cv2.resize(sample['mask'].astype(np.float32), tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST) sample['mask'] = sample['mask'].astype(bool) return tuple(shape) class Resize(object): def __init__(self, width, height, resize_target=True, keep_aspect_ratio=False, ensure_multiple_of=1, resize_method='lower_bound', image_interpolation_method=cv2.INTER_AREA): self.__width = width self.__height = height self.__resize_target = resize_target self.__keep_aspect_ratio = keep_aspect_ratio self.__multiple_of = ensure_multiple_of self.__resize_method = resize_method self.__image_interpolation_method = image_interpolation_method def constrain_to_multiple_of(self, x, min_val=0, max_val=None): y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) if max_val is not None and y > max_val: y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) if y < min_val: y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) return y def get_size(self, width, height): scale_height = self.__height / height scale_width = self.__width / width if self.__keep_aspect_ratio: if self.__resize_method == 'lower_bound': if scale_width > scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'upper_bound': if scale_width < scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'minimal': if abs(1 - scale_width) < abs(1 - scale_height): scale_height = scale_width else: scale_width = scale_height else: raise ValueError(f'resize_method {self.__resize_method} not implemented') if self.__resize_method == 'lower_bound': new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width) elif self.__resize_method == 'upper_bound': new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width) elif self.__resize_method == 'minimal': new_height = self.constrain_to_multiple_of(scale_height * height) new_width = self.constrain_to_multiple_of(scale_width * width) else: raise ValueError(f'resize_method {self.__resize_method} not implemented') return (new_width, new_height) def __call__(self, sample): (width, height) = self.get_size(sample['image'].shape[1], sample['image'].shape[0]) sample['image'] = cv2.resize(sample['image'], (width, height), interpolation=self.__image_interpolation_method) if self.__resize_target: if 'disparity' in sample: sample['disparity'] = cv2.resize(sample['disparity'], (width, height), interpolation=cv2.INTER_NEAREST) if 'depth' in sample: sample['depth'] = cv2.resize(sample['depth'], (width, height), interpolation=cv2.INTER_NEAREST) sample['mask'] = cv2.resize(sample['mask'].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST) sample['mask'] = sample['mask'].astype(bool) return sample class NormalizeImage(object): def __init__(self, mean, std): self.__mean = mean self.__std = std def __call__(self, sample): sample['image'] = (sample['image'] - self.__mean) / self.__std return sample class PrepareForNet(object): def __init__(self): pass def __call__(self, sample): image = np.transpose(sample['image'], (2, 0, 1)) sample['image'] = np.ascontiguousarray(image).astype(np.float32) if 'mask' in sample: sample['mask'] = sample['mask'].astype(np.float32) sample['mask'] = np.ascontiguousarray(sample['mask']) if 'disparity' in sample: disparity = sample['disparity'].astype(np.float32) sample['disparity'] = np.ascontiguousarray(disparity) if 'depth' in sample: depth = sample['depth'].astype(np.float32) sample['depth'] = np.ascontiguousarray(depth) return sample # File: controlnet_aux-master/src/controlnet_aux/midas/midas/vit.py import torch import torch.nn as nn import timm import types import math import torch.nn.functional as F class Slice(nn.Module): def __init__(self, start_index=1): super(Slice, self).__init__() self.start_index = start_index def forward(self, x): return x[:, self.start_index:] class AddReadout(nn.Module): def __init__(self, start_index=1): super(AddReadout, self).__init__() self.start_index = start_index def forward(self, x): if self.start_index == 2: readout = (x[:, 0] + x[:, 1]) / 2 else: readout = x[:, 0] return x[:, self.start_index:] + readout.unsqueeze(1) class ProjectReadout(nn.Module): def __init__(self, in_features, start_index=1): super(ProjectReadout, self).__init__() self.start_index = start_index self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) def forward(self, x): readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:]) features = torch.cat((x[:, self.start_index:], readout), -1) return self.project(features) class Transpose(nn.Module): def __init__(self, dim0, dim1): super(Transpose, self).__init__() self.dim0 = dim0 self.dim1 = dim1 def forward(self, x): x = x.transpose(self.dim0, self.dim1) return x def forward_vit(pretrained, x): (b, c, h, w) = x.shape glob = pretrained.model.forward_flex(x) layer_1 = pretrained.activations['1'] layer_2 = pretrained.activations['2'] layer_3 = pretrained.activations['3'] layer_4 = pretrained.activations['4'] layer_1 = pretrained.act_postprocess1[0:2](layer_1) layer_2 = pretrained.act_postprocess2[0:2](layer_2) layer_3 = pretrained.act_postprocess3[0:2](layer_3) layer_4 = pretrained.act_postprocess4[0:2](layer_4) unflatten = nn.Sequential(nn.Unflatten(2, torch.Size([h // pretrained.model.patch_size[1], w // pretrained.model.patch_size[0]]))) if layer_1.ndim == 3: layer_1 = unflatten(layer_1) if layer_2.ndim == 3: layer_2 = unflatten(layer_2) if layer_3.ndim == 3: layer_3 = unflatten(layer_3) if layer_4.ndim == 3: layer_4 = unflatten(layer_4) layer_1 = pretrained.act_postprocess1[3:len(pretrained.act_postprocess1)](layer_1) layer_2 = pretrained.act_postprocess2[3:len(pretrained.act_postprocess2)](layer_2) layer_3 = pretrained.act_postprocess3[3:len(pretrained.act_postprocess3)](layer_3) layer_4 = pretrained.act_postprocess4[3:len(pretrained.act_postprocess4)](layer_4) return (layer_1, layer_2, layer_3, layer_4) def _resize_pos_embed(self, posemb, gs_h, gs_w): (posemb_tok, posemb_grid) = (posemb[:, :self.start_index], posemb[0, self.start_index:]) gs_old = int(math.sqrt(len(posemb_grid))) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward_flex(self, x): (b, c, h, w) = x.shape pos_embed = self._resize_pos_embed(self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]) B = x.shape[0] if hasattr(self.patch_embed, 'backbone'): x = self.patch_embed.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) if getattr(self, 'dist_token', None) is not None: cls_tokens = self.cls_token.expand(B, -1, -1) dist_token = self.dist_token.expand(B, -1, -1) x = torch.cat((cls_tokens, dist_token, x), dim=1) else: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) x = x + pos_embed x = self.pos_drop(x) for blk in self.blocks: x = blk(x) x = self.norm(x) return x activations = {} def get_activation(name): def hook(model, input, output): activations[name] = output return hook def get_readout_oper(vit_features, features, use_readout, start_index=1): if use_readout == 'ignore': readout_oper = [Slice(start_index)] * len(features) elif use_readout == 'add': readout_oper = [AddReadout(start_index)] * len(features) elif use_readout == 'project': readout_oper = [ProjectReadout(vit_features, start_index) for out_feat in features] else: assert False, "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" return readout_oper def _make_vit_b16_backbone(model, features=[96, 192, 384, 768], size=[384, 384], hooks=[2, 5, 8, 11], vit_features=768, use_readout='ignore', start_index=1): pretrained = nn.Module() pretrained.model = model pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('2')) pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('3')) pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation('4')) pretrained.activations = activations readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) pretrained.act_postprocess1 = nn.Sequential(readout_oper[0], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[0], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[0], out_channels=features[0], kernel_size=4, stride=4, padding=0, bias=True, dilation=1, groups=1)) pretrained.act_postprocess2 = nn.Sequential(readout_oper[1], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[1], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[1], out_channels=features[1], kernel_size=2, stride=2, padding=0, bias=True, dilation=1, groups=1)) pretrained.act_postprocess3 = nn.Sequential(readout_oper[2], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[2], kernel_size=1, stride=1, padding=0)) pretrained.act_postprocess4 = nn.Sequential(readout_oper[3], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[3], kernel_size=1, stride=1, padding=0), nn.Conv2d(in_channels=features[3], out_channels=features[3], kernel_size=3, stride=2, padding=1)) pretrained.model.start_index = start_index pretrained.model.patch_size = [16, 16] pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) pretrained.model._resize_pos_embed = types.MethodType(_resize_pos_embed, pretrained.model) return pretrained def _make_pretrained_vitl16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_large_patch16_384', pretrained=pretrained) hooks = [5, 11, 17, 23] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout) def _make_pretrained_vitb16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_base_patch16_384', pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout) def _make_pretrained_deitb16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_deit_base_patch16_384', pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout) def _make_pretrained_deitb16_distil_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout, start_index=2) def _make_vit_b_rn50_backbone(model, features=[256, 512, 768, 768], size=[384, 384], hooks=[0, 1, 8, 11], vit_features=768, use_vit_only=False, use_readout='ignore', start_index=1): pretrained = nn.Module() pretrained.model = model if use_vit_only == True: pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('2')) else: pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(get_activation('1')) pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(get_activation('2')) pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('3')) pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation('4')) pretrained.activations = activations readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) if use_vit_only == True: pretrained.act_postprocess1 = nn.Sequential(readout_oper[0], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[0], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[0], out_channels=features[0], kernel_size=4, stride=4, padding=0, bias=True, dilation=1, groups=1)) pretrained.act_postprocess2 = nn.Sequential(readout_oper[1], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[1], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[1], out_channels=features[1], kernel_size=2, stride=2, padding=0, bias=True, dilation=1, groups=1)) else: pretrained.act_postprocess1 = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity()) pretrained.act_postprocess2 = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity()) pretrained.act_postprocess3 = nn.Sequential(readout_oper[2], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[2], kernel_size=1, stride=1, padding=0)) pretrained.act_postprocess4 = nn.Sequential(readout_oper[3], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[3], kernel_size=1, stride=1, padding=0), nn.Conv2d(in_channels=features[3], out_channels=features[3], kernel_size=3, stride=2, padding=1)) pretrained.model.start_index = start_index pretrained.model.patch_size = [16, 16] pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) pretrained.model._resize_pos_embed = types.MethodType(_resize_pos_embed, pretrained.model) return pretrained def _make_pretrained_vitb_rn50_384(pretrained, use_readout='ignore', hooks=None, use_vit_only=False): model = timm.create_model('vit_base_resnet50_384', pretrained=pretrained) hooks = [0, 1, 8, 11] if hooks == None else hooks return _make_vit_b_rn50_backbone(model, features=[256, 512, 768, 768], size=[384, 384], hooks=hooks, use_vit_only=use_vit_only, use_readout=use_readout) # File: controlnet_aux-master/src/controlnet_aux/midas/utils.py """""" import sys import re import numpy as np import cv2 import torch def read_pfm(path): with open(path, 'rb') as file: color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if header.decode('ascii') == 'PF': color = True elif header.decode('ascii') == 'Pf': color = False else: raise Exception('Not a PFM file: ' + path) dim_match = re.match('^(\\d+)\\s(\\d+)\\s$', file.readline().decode('ascii')) if dim_match: (width, height) = list(map(int, dim_match.groups())) else: raise Exception('Malformed PFM header.') scale = float(file.readline().decode('ascii').rstrip()) if scale < 0: endian = '<' scale = -scale else: endian = '>' data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return (data, scale) def write_pfm(path, image, scale=1): with open(path, 'wb') as file: color = None if image.dtype.name != 'float32': raise Exception('Image dtype must be float32.') image = np.flipud(image) if len(image.shape) == 3 and image.shape[2] == 3: color = True elif len(image.shape) == 2 or (len(image.shape) == 3 and image.shape[2] == 1): color = False else: raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') file.write('PF\n' if color else 'Pf\n'.encode()) file.write('%d %d\n'.encode() % (image.shape[1], image.shape[0])) endian = image.dtype.byteorder if endian == '<' or (endian == '=' and sys.byteorder == 'little'): scale = -scale file.write('%f\n'.encode() % scale) image.tofile(file) def read_image(path): img = cv2.imread(path) if img.ndim == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 return img def resize_image(img): height_orig = img.shape[0] width_orig = img.shape[1] if width_orig > height_orig: scale = width_orig / 384 else: scale = height_orig / 384 height = (np.ceil(height_orig / scale / 32) * 32).astype(int) width = (np.ceil(width_orig / scale / 32) * 32).astype(int) img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) img_resized = torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() img_resized = img_resized.unsqueeze(0) return img_resized def resize_depth(depth, width, height): depth = torch.squeeze(depth[0, :, :, :]).to('cpu') depth_resized = cv2.resize(depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC) return depth_resized def write_depth(path, depth, bits=1): write_pfm(path + '.pfm', depth.astype(np.float32)) depth_min = depth.min() depth_max = depth.max() max_val = 2 ** (8 * bits) - 1 if depth_max - depth_min > np.finfo('float').eps: out = max_val * (depth - depth_min) / (depth_max - depth_min) else: out = np.zeros(depth.shape, dtype=depth.type) if bits == 1: cv2.imwrite(path + '.png', out.astype('uint8')) elif bits == 2: cv2.imwrite(path + '.png', out.astype('uint16')) return # File: controlnet_aux-master/src/controlnet_aux/mlsd/__init__.py import os import warnings import cv2 import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .models.mbv2_mlsd_large import MobileV2_MLSD_Large from .utils import pred_lines class MLSDdetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None, local_files_only=False): if pretrained_model_or_path == 'lllyasviel/ControlNet': filename = filename or 'annotator/ckpts/mlsd_large_512_fp32.pth' else: filename = filename or 'mlsd_large_512_fp32.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) model = MobileV2_MLSD_Large() model.load_state_dict(torch.load(model_path), strict=True) model.eval() return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, thr_v=0.1, thr_d=0.1, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 img = input_image img_output = np.zeros_like(img) try: with torch.no_grad(): lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d) for line in lines: (x_start, y_start, x_end, y_end) = [int(val) for val in line] cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1) except Exception as e: pass detected_map = img_output[:, :, 0] detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/mlsd/models/mbv2_mlsd_large.py import os import sys import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo from torch.nn import functional as F class BlockTypeA(nn.Module): def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale=True): super(BlockTypeA, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c2, out_c2, kernel_size=1), nn.BatchNorm2d(out_c2), nn.ReLU(inplace=True)) self.conv2 = nn.Sequential(nn.Conv2d(in_c1, out_c1, kernel_size=1), nn.BatchNorm2d(out_c1), nn.ReLU(inplace=True)) self.upscale = upscale def forward(self, a, b): b = self.conv1(b) a = self.conv2(a) if self.upscale: b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True) return torch.cat((a, b), dim=1) class BlockTypeB(nn.Module): def __init__(self, in_c, out_c): super(BlockTypeB, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(in_c, out_c, kernel_size=3, padding=1), nn.BatchNorm2d(out_c), nn.ReLU()) def forward(self, x): x = self.conv1(x) + x x = self.conv2(x) return x class BlockTypeC(nn.Module): def __init__(self, in_c, out_c): super(BlockTypeC, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): self.channel_pad = out_planes - in_planes self.stride = stride if stride == 2: padding = 0 else: padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True)) self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride) def forward(self, x): if self.stride == 2: x = F.pad(x, (0, 1, 0, 1), 'constant', 0) for module in self: if not isinstance(module, nn.MaxPool2d): x = module(x) return x class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers = [] if expand_ratio != 1: layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, pretrained=True): super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 width_mult = 1.0 round_nearest = 8 inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1]] if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting)) input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(4, input_channel, stride=2)] for (t, c, n, s) in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel self.features = nn.Sequential(*features) self.fpn_selected = [1, 3, 6, 10, 13] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) if pretrained: self._load_pretrained_model() def _forward_impl(self, x): fpn_features = [] for (i, f) in enumerate(self.features): if i > self.fpn_selected[-1]: break x = f(x) if i in self.fpn_selected: fpn_features.append(x) (c1, c2, c3, c4, c5) = fpn_features return (c1, c2, c3, c4, c5) def forward(self, x): return self._forward_impl(x) def _load_pretrained_model(self): pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth') model_dict = {} state_dict = self.state_dict() for (k, v) in pretrain_dict.items(): if k in state_dict: model_dict[k] = v state_dict.update(model_dict) self.load_state_dict(state_dict) class MobileV2_MLSD_Large(nn.Module): def __init__(self): super(MobileV2_MLSD_Large, self).__init__() self.backbone = MobileNetV2(pretrained=False) self.block15 = BlockTypeA(in_c1=64, in_c2=96, out_c1=64, out_c2=64, upscale=False) self.block16 = BlockTypeB(128, 64) self.block17 = BlockTypeA(in_c1=32, in_c2=64, out_c1=64, out_c2=64) self.block18 = BlockTypeB(128, 64) self.block19 = BlockTypeA(in_c1=24, in_c2=64, out_c1=64, out_c2=64) self.block20 = BlockTypeB(128, 64) self.block21 = BlockTypeA(in_c1=16, in_c2=64, out_c1=64, out_c2=64) self.block22 = BlockTypeB(128, 64) self.block23 = BlockTypeC(64, 16) def forward(self, x): (c1, c2, c3, c4, c5) = self.backbone(x) x = self.block15(c4, c5) x = self.block16(x) x = self.block17(c3, x) x = self.block18(x) x = self.block19(c2, x) x = self.block20(x) x = self.block21(c1, x) x = self.block22(x) x = self.block23(x) x = x[:, 7:, :, :] return x # File: controlnet_aux-master/src/controlnet_aux/mlsd/models/mbv2_mlsd_tiny.py import os import sys import torch import torch.nn as nn import torch.utils.model_zoo as model_zoo from torch.nn import functional as F class BlockTypeA(nn.Module): def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale=True): super(BlockTypeA, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c2, out_c2, kernel_size=1), nn.BatchNorm2d(out_c2), nn.ReLU(inplace=True)) self.conv2 = nn.Sequential(nn.Conv2d(in_c1, out_c1, kernel_size=1), nn.BatchNorm2d(out_c1), nn.ReLU(inplace=True)) self.upscale = upscale def forward(self, a, b): b = self.conv1(b) a = self.conv2(a) b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True) return torch.cat((a, b), dim=1) class BlockTypeB(nn.Module): def __init__(self, in_c, out_c): super(BlockTypeB, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(in_c, out_c, kernel_size=3, padding=1), nn.BatchNorm2d(out_c), nn.ReLU()) def forward(self, x): x = self.conv1(x) + x x = self.conv2(x) return x class BlockTypeC(nn.Module): def __init__(self, in_c, out_c): super(BlockTypeC, self).__init__() self.conv1 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv2 = nn.Sequential(nn.Conv2d(in_c, in_c, kernel_size=3, padding=1), nn.BatchNorm2d(in_c), nn.ReLU()) self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) return x def _make_divisible(v, divisor, min_value=None): if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): self.channel_pad = out_planes - in_planes self.stride = stride if stride == 2: padding = 0 else: padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True)) self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride) def forward(self, x): if self.stride == 2: x = F.pad(x, (0, 1, 0, 1), 'constant', 0) for module in self: if not isinstance(module, nn.MaxPool2d): x = module(x) return x class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers = [] if expand_ratio != 1: layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup)]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, pretrained=True): super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 last_channel = 1280 width_mult = 1.0 round_nearest = 8 inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2]] if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting)) input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(4, input_channel, stride=2)] for (t, c, n, s) in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel self.features = nn.Sequential(*features) self.fpn_selected = [3, 6, 10] for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x): fpn_features = [] for (i, f) in enumerate(self.features): if i > self.fpn_selected[-1]: break x = f(x) if i in self.fpn_selected: fpn_features.append(x) (c2, c3, c4) = fpn_features return (c2, c3, c4) def forward(self, x): return self._forward_impl(x) def _load_pretrained_model(self): pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth') model_dict = {} state_dict = self.state_dict() for (k, v) in pretrain_dict.items(): if k in state_dict: model_dict[k] = v state_dict.update(model_dict) self.load_state_dict(state_dict) class MobileV2_MLSD_Tiny(nn.Module): def __init__(self): super(MobileV2_MLSD_Tiny, self).__init__() self.backbone = MobileNetV2(pretrained=True) self.block12 = BlockTypeA(in_c1=32, in_c2=64, out_c1=64, out_c2=64) self.block13 = BlockTypeB(128, 64) self.block14 = BlockTypeA(in_c1=24, in_c2=64, out_c1=32, out_c2=32) self.block15 = BlockTypeB(64, 64) self.block16 = BlockTypeC(64, 16) def forward(self, x): (c2, c3, c4) = self.backbone(x) x = self.block12(c3, c4) x = self.block13(x) x = self.block14(c2, x) x = self.block15(x) x = self.block16(x) x = x[:, 7:, :, :] x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True) return x # File: controlnet_aux-master/src/controlnet_aux/mlsd/utils.py """""" '' import os import numpy as np import cv2 import torch from torch.nn import functional as F def deccode_output_score_and_ptss(tpMap, topk_n=200, ksize=5): (b, c, h, w) = tpMap.shape assert b == 1, 'only support bsize==1' displacement = tpMap[:, 1:5, :, :][0] center = tpMap[:, 0, :, :] heat = torch.sigmoid(center) hmax = F.max_pool2d(heat, (ksize, ksize), stride=1, padding=(ksize - 1) // 2) keep = (hmax == heat).float() heat = heat * keep heat = heat.reshape(-1) (scores, indices) = torch.topk(heat, topk_n, dim=-1, largest=True) yy = torch.floor_divide(indices, w).unsqueeze(-1) xx = torch.fmod(indices, w).unsqueeze(-1) ptss = torch.cat((yy, xx), dim=-1) ptss = ptss.detach().cpu().numpy() scores = scores.detach().cpu().numpy() displacement = displacement.detach().cpu().numpy() displacement = displacement.transpose((1, 2, 0)) return (ptss, scores, displacement) def pred_lines(image, model, input_shape=[512, 512], score_thr=0.1, dist_thr=20.0): (h, w, _) = image.shape device = next(iter(model.parameters())).device (h_ratio, w_ratio) = [h / input_shape[0], w / input_shape[1]] resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA), np.ones([input_shape[0], input_shape[1], 1])], axis=-1) resized_image = resized_image.transpose((2, 0, 1)) batch_image = np.expand_dims(resized_image, axis=0).astype('float32') batch_image = batch_image / 127.5 - 1.0 batch_image = torch.from_numpy(batch_image).float() batch_image = batch_image.to(device) outputs = model(batch_image) (pts, pts_score, vmap) = deccode_output_score_and_ptss(outputs, 200, 3) start = vmap[:, :, :2] end = vmap[:, :, 2:] dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) segments_list = [] for (center, score) in zip(pts, pts_score): (y, x) = center distance = dist_map[y, x] if score > score_thr and distance > dist_thr: (disp_x_start, disp_y_start, disp_x_end, disp_y_end) = vmap[y, x, :] x_start = x + disp_x_start y_start = y + disp_y_start x_end = x + disp_x_end y_end = y + disp_y_end segments_list.append([x_start, y_start, x_end, y_end]) lines = 2 * np.array(segments_list) lines[:, 0] = lines[:, 0] * w_ratio lines[:, 1] = lines[:, 1] * h_ratio lines[:, 2] = lines[:, 2] * w_ratio lines[:, 3] = lines[:, 3] * h_ratio return lines def pred_squares(image, model, input_shape=[512, 512], params={'score': 0.06, 'outside_ratio': 0.28, 'inside_ratio': 0.45, 'w_overlap': 0.0, 'w_degree': 1.95, 'w_length': 0.0, 'w_area': 1.86, 'w_center': 0.14}): (h, w, _) = image.shape original_shape = [h, w] device = next(iter(model.parameters())).device resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA), np.ones([input_shape[0], input_shape[1], 1])], axis=-1) resized_image = resized_image.transpose((2, 0, 1)) batch_image = np.expand_dims(resized_image, axis=0).astype('float32') batch_image = batch_image / 127.5 - 1.0 batch_image = torch.from_numpy(batch_image).float().to(device) outputs = model(batch_image) (pts, pts_score, vmap) = deccode_output_score_and_ptss(outputs, 200, 3) start = vmap[:, :, :2] end = vmap[:, :, 2:] dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1)) junc_list = [] segments_list = [] for (junc, score) in zip(pts, pts_score): (y, x) = junc distance = dist_map[y, x] if score > params['score'] and distance > 20.0: junc_list.append([x, y]) (disp_x_start, disp_y_start, disp_x_end, disp_y_end) = vmap[y, x, :] d_arrow = 1.0 x_start = x + d_arrow * disp_x_start y_start = y + d_arrow * disp_y_start x_end = x + d_arrow * disp_x_end y_end = y + d_arrow * disp_y_end segments_list.append([x_start, y_start, x_end, y_end]) segments = np.array(segments_list) point = np.array([[0, 0]]) point = point[0] start = segments[:, :2] end = segments[:, 2:] diff = start - end a = diff[:, 1] b = -diff[:, 0] c = a * start[:, 0] + b * start[:, 1] d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10) theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi theta[theta < 0.0] += 180 hough = np.concatenate([d[:, None], theta[:, None]], axis=-1) d_quant = 1 theta_quant = 2 hough[:, 0] //= d_quant hough[:, 1] //= theta_quant (_, indices, counts) = np.unique(hough, axis=0, return_index=True, return_counts=True) acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32') idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1 yx_indices = hough[indices, :].astype('int32') acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices acc_map_np = acc_map acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0) (_, _, h, w) = acc_map.shape max_acc_map = F.max_pool2d(acc_map, kernel_size=5, stride=1, padding=2) acc_map = acc_map * (acc_map == max_acc_map).float() flatten_acc_map = acc_map.reshape([-1]) (scores, indices) = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True) yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1) xx = torch.fmod(indices, w).unsqueeze(-1) yx = torch.cat((yy, xx), dim=-1) yx = yx.detach().cpu().numpy() topk_values = scores.detach().cpu().numpy() indices = idx_map[yx[:, 0], yx[:, 1]] basis = 5 // 2 merged_segments = [] for (yx_pt, max_indice, value) in zip(yx, indices, topk_values): (y, x) = yx_pt if max_indice == -1 or value == 0: continue segment_list = [] for y_offset in range(-basis, basis + 1): for x_offset in range(-basis, basis + 1): indice = idx_map[y + y_offset, x + x_offset] cnt = int(acc_map_np[y + y_offset, x + x_offset]) if indice != -1: segment_list.append(segments[indice]) if cnt > 1: check_cnt = 1 current_hough = hough[indice] for (new_indice, new_hough) in enumerate(hough): if (current_hough == new_hough).all() and indice != new_indice: segment_list.append(segments[new_indice]) check_cnt += 1 if check_cnt == cnt: break group_segments = np.array(segment_list).reshape([-1, 2]) sorted_group_segments = np.sort(group_segments, axis=0) (x_min, y_min) = sorted_group_segments[0, :] (x_max, y_max) = sorted_group_segments[-1, :] deg = theta[max_indice] if deg >= 90: merged_segments.append([x_min, y_max, x_max, y_min]) else: merged_segments.append([x_min, y_min, x_max, y_max]) new_segments = np.array(merged_segments) start = new_segments[:, :2] end = new_segments[:, 2:] new_centers = (start + end) / 2.0 diff = start - end dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1)) a = diff[:, 1] b = -diff[:, 0] c = a * start[:, 0] + b * start[:, 1] pre_det = a[:, None] * b[None, :] det = pre_det - np.transpose(pre_det) pre_inter_y = a[:, None] * c[None, :] inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10) pre_inter_x = c[:, None] * b[None, :] inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10) inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32') '' dist_inter_to_segment1_start = np.sqrt(np.sum((inter_pts - start[:, None, :]) ** 2, axis=-1, keepdims=True)) dist_inter_to_segment1_end = np.sqrt(np.sum((inter_pts - end[:, None, :]) ** 2, axis=-1, keepdims=True)) dist_inter_to_segment2_start = np.sqrt(np.sum((inter_pts - start[None, :, :]) ** 2, axis=-1, keepdims=True)) dist_inter_to_segment2_end = np.sqrt(np.sum((inter_pts - end[None, :, :]) ** 2, axis=-1, keepdims=True)) dist_inter_to_segment1 = np.sort(np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1), axis=-1) dist_inter_to_segment2 = np.sort(np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1), axis=-1) inter_to_start = new_centers[:, None, :] - inter_pts deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi deg_inter_to_start[deg_inter_to_start < 0.0] += 360 inter_to_end = new_centers[None, :, :] - inter_pts deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi deg_inter_to_end[deg_inter_to_end < 0.0] += 360 '' (deg1_map, deg2_map) = (deg_inter_to_start, deg_inter_to_end) deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1) deg_diff_map = np.abs(deg1_map - deg2_map) deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180] deg_range = [60, 120] corner_dict = {corner_info: [] for corner_info in range(4)} inter_points = [] for i in range(inter_pts.shape[0]): for j in range(i + 1, inter_pts.shape[1]): (x, y) = inter_pts[i, j, :] (deg1, deg2) = deg_sort[i, j, :] deg_diff = deg_diff_map[i, j] check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1] outside_ratio = params['outside_ratio'] inside_ratio = params['inside_ratio'] check_distance = (dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio or (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and (dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio or (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio)) if check_degree and check_distance: corner_info = None if deg1 >= 0 and deg1 <= 45 and (deg2 >= 45) and (deg2 <= 120) or (deg2 >= 315 and deg1 >= 45 and (deg1 <= 120)): (corner_info, color_info) = (0, 'blue') elif deg1 >= 45 and deg1 <= 125 and (deg2 >= 125) and (deg2 <= 225): (corner_info, color_info) = (1, 'green') elif deg1 >= 125 and deg1 <= 225 and (deg2 >= 225) and (deg2 <= 315): (corner_info, color_info) = (2, 'black') elif deg1 >= 0 and deg1 <= 45 and (deg2 >= 225) and (deg2 <= 315) or (deg2 >= 315 and deg1 >= 225 and (deg1 <= 315)): (corner_info, color_info) = (3, 'cyan') else: (corner_info, color_info) = (4, 'red') continue corner_dict[corner_info].append([x, y, i, j]) inter_points.append([x, y]) square_list = [] connect_list = [] segments_list = [] for corner0 in corner_dict[0]: for corner1 in corner_dict[1]: connect01 = False for corner0_line in corner0[2:]: if corner0_line in corner1[2:]: connect01 = True break if connect01: for corner2 in corner_dict[2]: connect12 = False for corner1_line in corner1[2:]: if corner1_line in corner2[2:]: connect12 = True break if connect12: for corner3 in corner_dict[3]: connect23 = False for corner2_line in corner2[2:]: if corner2_line in corner3[2:]: connect23 = True break if connect23: for corner3_line in corner3[2:]: if corner3_line in corner0[2:]: '' square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2]) connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line]) segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:]) def check_outside_inside(segments_info, connect_idx): if connect_idx == segments_info[0]: check_dist_mat = dist_inter_to_segment1 else: check_dist_mat = dist_inter_to_segment2 (i, j) = segments_info (min_dist, max_dist) = check_dist_mat[i, j, :] connect_dist = dist_segments[connect_idx] if max_dist > connect_dist: return ('outside', min_dist, 0, 1) else: return ('inside', min_dist, -1, -1) top_square = None try: map_size = input_shape[0] / 2 squares = np.array(square_list).reshape([-1, 4, 2]) score_array = [] connect_array = np.array(connect_list) segments_array = np.array(segments_list).reshape([-1, 4, 2]) squares_rollup = np.roll(squares, 1, axis=1) squares_rolldown = np.roll(squares, -1, axis=1) vec1 = squares_rollup - squares normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10) vec2 = squares_rolldown - squares normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10) inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) squares_degree = np.arccos(inner_products) * 180 / np.pi overlap_scores = [] degree_scores = [] length_scores = [] for (connects, segments, square, degree) in zip(connect_array, segments_array, squares, squares_degree): '' cover = 0 perimeter = 0 square_length = [] for start_idx in range(4): end_idx = (start_idx + 1) % 4 connect_idx = connects[start_idx] start_segments = segments[start_idx] end_segments = segments[end_idx] start_point = square[start_idx] end_point = square[end_idx] (start_position, start_min, start_cover_param, start_peri_param) = check_outside_inside(start_segments, connect_idx) (end_position, end_min, end_cover_param, end_peri_param) = check_outside_inside(end_segments, connect_idx) cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min square_length.append(dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min) overlap_scores.append(cover / perimeter) '' (deg0, deg1, deg2, deg3) = degree deg_ratio1 = deg0 / deg2 if deg_ratio1 > 1.0: deg_ratio1 = 1 / deg_ratio1 deg_ratio2 = deg1 / deg3 if deg_ratio2 > 1.0: deg_ratio2 = 1 / deg_ratio2 degree_scores.append((deg_ratio1 + deg_ratio2) / 2) '' (len0, len1, len2, len3) = square_length len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0 len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1 length_scores.append((len_ratio1 + len_ratio2) / 2) overlap_scores = np.array(overlap_scores) overlap_scores /= np.max(overlap_scores) degree_scores = np.array(degree_scores) length_scores = np.array(length_scores) area_scores = np.reshape(squares, [-1, 4, 2]) area_x = area_scores[:, :, 0] area_y = area_scores[:, :, 1] correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0] area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1) area_scores = 0.5 * np.abs(area_scores + correction) area_scores /= map_size * map_size centers = np.array([[256 // 2, 256 // 2]], dtype='float32') square_centers = np.mean(squares, axis=1) center2center = np.sqrt(np.sum((centers - square_centers) ** 2)) center_scores = center2center / (map_size / np.sqrt(2.0)) '' score_w = [0.0, 1.0, 10.0, 0.5, 1.0] score_array = params['w_overlap'] * overlap_scores + params['w_degree'] * degree_scores + params['w_area'] * area_scores - params['w_center'] * center_scores + params['w_length'] * length_scores best_square = [] sorted_idx = np.argsort(score_array)[::-1] score_array = score_array[sorted_idx] squares = squares[sorted_idx] except Exception as e: pass '' try: new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1] new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0] new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1] new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0] except: new_segments = [] try: squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1] squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0] except: squares = [] score_array = [] try: inter_points = np.array(inter_points) inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1] inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0] except: inter_points = [] return (new_segments, squares, score_array, inter_points) # File: controlnet_aux-master/src/controlnet_aux/normalbae/__init__.py import os import types import warnings import cv2 import numpy as np import torch import torchvision.transforms as transforms from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .nets.NNET import NNET def load_checkpoint(fpath, model): ckpt = torch.load(fpath, map_location='cpu')['model'] load_dict = {} for (k, v) in ckpt.items(): if k.startswith('module.'): k_ = k.replace('module.', '') load_dict[k_] = v else: load_dict[k] = v model.load_state_dict(load_dict) return model class NormalBaeDetector: def __init__(self, model): self.model = model self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None, local_files_only=False): filename = filename or 'scannet.pt' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) args = types.SimpleNamespace() args.mode = 'client' args.architecture = 'BN' args.pretrained = 'scannet' args.sampling_ratio = 0.4 args.importance_ratio = 0.7 model = NNET(args) model = load_checkpoint(model_path, model) model.eval() return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 image_normal = input_image with torch.no_grad(): image_normal = torch.from_numpy(image_normal).float().to(device) image_normal = image_normal / 255.0 image_normal = rearrange(image_normal, 'h w c -> 1 c h w') image_normal = self.norm(image_normal) normal = self.model(image_normal) normal = normal[0][-1][:, :3] normal = ((normal + 1) * 0.5).clip(0, 1) normal = rearrange(normal[0], 'c h w -> h w c').cpu().numpy() normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8) detected_map = normal_image detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/NNET.py import torch import torch.nn as nn import torch.nn.functional as F from .submodules.encoder import Encoder from .submodules.decoder import Decoder class NNET(nn.Module): def __init__(self, args): super(NNET, self).__init__() self.encoder = Encoder() self.decoder = Decoder(args) def get_1x_lr_params(self): return self.encoder.parameters() def get_10x_lr_params(self): return self.decoder.parameters() def forward(self, img, **kwargs): return self.decoder(self.encoder(img), **kwargs) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/baseline.py import torch import torch.nn as nn import torch.nn.functional as F from .submodules.submodules import UpSampleBN, norm_normalize class NNET(nn.Module): def __init__(self, args=None): super(NNET, self).__init__() self.encoder = Encoder() self.decoder = Decoder(num_classes=4) def forward(self, x, **kwargs): out = self.decoder(self.encoder(x), **kwargs) up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False) up_out = norm_normalize(up_out) return up_out def get_1x_lr_params(self): return self.encoder.parameters() def get_10x_lr_params(self): modules = [self.decoder] for m in modules: yield from m.parameters() class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() basemodel_name = 'tf_efficientnet_b5_ap' basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) basemodel.global_pool = nn.Identity() basemodel.classifier = nn.Identity() self.original_model = basemodel def forward(self, x): features = [x] for (k, v) in self.original_model._modules.items(): if k == 'blocks': for (ki, vi) in v._modules.items(): features.append(vi(features[-1])) else: features.append(v(features[-1])) return features class Decoder(nn.Module): def __init__(self, num_classes=4): super(Decoder, self).__init__() self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512) self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256) self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128) self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1) def forward(self, features): (x_block0, x_block1, x_block2, x_block3, x_block4) = (features[4], features[5], features[6], features[8], features[11]) x_d0 = self.conv2(x_block4) x_d1 = self.up1(x_d0, x_block3) x_d2 = self.up2(x_d1, x_block2) x_d3 = self.up3(x_d2, x_block1) x_d4 = self.up4(x_d3, x_block0) out = self.conv3(x_d4) return out if __name__ == '__main__': model = Baseline() x = torch.rand(2, 3, 480, 640) out = model(x) print(out.shape) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/decoder.py import torch import torch.nn as nn import torch.nn.functional as F from .submodules import UpSampleBN, UpSampleGN, norm_normalize, sample_points class Decoder(nn.Module): def __init__(self, args): super(Decoder, self).__init__() self.sampling_ratio = args.sampling_ratio self.importance_ratio = args.importance_ratio self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) if args.architecture == 'BN': self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512) self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256) self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128) elif args.architecture == 'GN': self.up1 = UpSampleGN(skip_input=2048 + 176, output_features=1024) self.up2 = UpSampleGN(skip_input=1024 + 64, output_features=512) self.up3 = UpSampleGN(skip_input=512 + 40, output_features=256) self.up4 = UpSampleGN(skip_input=256 + 24, output_features=128) else: raise Exception('invalid architecture') self.out_conv_res8 = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1) self.out_conv_res4 = nn.Sequential(nn.Conv1d(512 + 4, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 4, kernel_size=1)) self.out_conv_res2 = nn.Sequential(nn.Conv1d(256 + 4, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 4, kernel_size=1)) self.out_conv_res1 = nn.Sequential(nn.Conv1d(128 + 4, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(), nn.Conv1d(128, 4, kernel_size=1)) def forward(self, features, gt_norm_mask=None, mode='test'): (x_block0, x_block1, x_block2, x_block3, x_block4) = (features[4], features[5], features[6], features[8], features[11]) x_d0 = self.conv2(x_block4) x_d1 = self.up1(x_d0, x_block3) x_d2 = self.up2(x_d1, x_block2) x_d3 = self.up3(x_d2, x_block1) x_d4 = self.up4(x_d3, x_block0) out_res8 = self.out_conv_res8(x_d2) out_res8 = norm_normalize(out_res8) if mode == 'train': out_res8_res4 = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True) (B, _, H, W) = out_res8_res4.shape (point_coords_res4, rows_int, cols_int) = sample_points(out_res8_res4.detach(), gt_norm_mask, sampling_ratio=self.sampling_ratio, beta=self.importance_ratio) out_res4 = out_res8_res4 feat_res4 = F.grid_sample(x_d2, point_coords_res4, mode='bilinear', align_corners=True) init_pred = F.grid_sample(out_res8, point_coords_res4, mode='bilinear', align_corners=True) feat_res4 = torch.cat([feat_res4, init_pred], dim=1) samples_pred_res4 = self.out_conv_res4(feat_res4[:, :, 0, :]) samples_pred_res4 = norm_normalize(samples_pred_res4) for i in range(B): out_res4[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res4[i, :, :] else: feat_map = F.interpolate(x_d2, scale_factor=2, mode='bilinear', align_corners=True) init_pred = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True) feat_map = torch.cat([feat_map, init_pred], dim=1) (B, _, H, W) = feat_map.shape out_res4 = self.out_conv_res4(feat_map.view(B, 512 + 4, -1)) out_res4 = norm_normalize(out_res4) out_res4 = out_res4.view(B, 4, H, W) samples_pred_res4 = point_coords_res4 = None if mode == 'train': out_res4_res2 = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True) (B, _, H, W) = out_res4_res2.shape (point_coords_res2, rows_int, cols_int) = sample_points(out_res4_res2.detach(), gt_norm_mask, sampling_ratio=self.sampling_ratio, beta=self.importance_ratio) out_res2 = out_res4_res2 feat_res2 = F.grid_sample(x_d3, point_coords_res2, mode='bilinear', align_corners=True) init_pred = F.grid_sample(out_res4, point_coords_res2, mode='bilinear', align_corners=True) feat_res2 = torch.cat([feat_res2, init_pred], dim=1) samples_pred_res2 = self.out_conv_res2(feat_res2[:, :, 0, :]) samples_pred_res2 = norm_normalize(samples_pred_res2) for i in range(B): out_res2[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res2[i, :, :] else: feat_map = F.interpolate(x_d3, scale_factor=2, mode='bilinear', align_corners=True) init_pred = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True) feat_map = torch.cat([feat_map, init_pred], dim=1) (B, _, H, W) = feat_map.shape out_res2 = self.out_conv_res2(feat_map.view(B, 256 + 4, -1)) out_res2 = norm_normalize(out_res2) out_res2 = out_res2.view(B, 4, H, W) samples_pred_res2 = point_coords_res2 = None if mode == 'train': out_res2_res1 = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True) (B, _, H, W) = out_res2_res1.shape (point_coords_res1, rows_int, cols_int) = sample_points(out_res2_res1.detach(), gt_norm_mask, sampling_ratio=self.sampling_ratio, beta=self.importance_ratio) out_res1 = out_res2_res1 feat_res1 = F.grid_sample(x_d4, point_coords_res1, mode='bilinear', align_corners=True) init_pred = F.grid_sample(out_res2, point_coords_res1, mode='bilinear', align_corners=True) feat_res1 = torch.cat([feat_res1, init_pred], dim=1) samples_pred_res1 = self.out_conv_res1(feat_res1[:, :, 0, :]) samples_pred_res1 = norm_normalize(samples_pred_res1) for i in range(B): out_res1[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res1[i, :, :] else: feat_map = F.interpolate(x_d4, scale_factor=2, mode='bilinear', align_corners=True) init_pred = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True) feat_map = torch.cat([feat_map, init_pred], dim=1) (B, _, H, W) = feat_map.shape out_res1 = self.out_conv_res1(feat_map.view(B, 128 + 4, -1)) out_res1 = norm_normalize(out_res1) out_res1 = out_res1.view(B, 4, H, W) samples_pred_res1 = point_coords_res1 = None return ([out_res8, out_res4, out_res2, out_res1], [out_res8, samples_pred_res4, samples_pred_res2, samples_pred_res1], [None, point_coords_res4, point_coords_res2, point_coords_res1]) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_benchmark.py """""" import argparse from caffe2.python import core, workspace, model_helper from caffe2.proto import caffe2_pb2 parser = argparse.ArgumentParser(description='Caffe2 Model Benchmark') parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME', help='caffe2 model pb name prefix') parser.add_argument('--c2-init', default='', type=str, metavar='PATH', help='caffe2 model init .pb') parser.add_argument('--c2-predict', default='', type=str, metavar='PATH', help='caffe2 model predict .pb') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 1)') parser.add_argument('--img-size', default=224, type=int, metavar='N', help='Input image dimension, uses model default if empty') def main(): args = parser.parse_args() args.gpu_id = 0 if args.c2_prefix: args.c2_init = args.c2_prefix + '.init.pb' args.c2_predict = args.c2_prefix + '.predict.pb' model = model_helper.ModelHelper(name='le_net', init_params=False) init_net_proto = caffe2_pb2.NetDef() with open(args.c2_init, 'rb') as f: init_net_proto.ParseFromString(f.read()) model.param_init_net = core.Net(init_net_proto) predict_net_proto = caffe2_pb2.NetDef() with open(args.c2_predict, 'rb') as f: predict_net_proto.ParseFromString(f.read()) model.net = core.Net(predict_net_proto) input_blob = model.net.external_inputs[0] model.param_init_net.GaussianFill([], input_blob.GetUnscopedName(), shape=(args.batch_size, 3, args.img_size, args.img_size), mean=0.0, std=1.0) workspace.RunNetOnce(model.param_init_net) workspace.CreateNet(model.net, overwrite=True) workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/caffe2_validate.py """""" import argparse import numpy as np from caffe2.python import core, workspace, model_helper from caffe2.proto import caffe2_pb2 from data import create_loader, resolve_data_config, Dataset from utils import AverageMeter import time parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME', help='caffe2 model pb name prefix') parser.add_argument('--c2-init', default='', type=str, metavar='PATH', help='caffe2 model init .pb') parser.add_argument('--c2-predict', default='', type=str, metavar='PATH', help='caffe2 model predict .pb') parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', help='Override default crop pct of 0.875') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', help='use tensorflow mnasnet preporcessing') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') def main(): args = parser.parse_args() args.gpu_id = 0 if args.c2_prefix: args.c2_init = args.c2_prefix + '.init.pb' args.c2_predict = args.c2_prefix + '.predict.pb' model = model_helper.ModelHelper(name='validation_net', init_params=False) init_net_proto = caffe2_pb2.NetDef() with open(args.c2_init, 'rb') as f: init_net_proto.ParseFromString(f.read()) model.param_init_net = core.Net(init_net_proto) predict_net_proto = caffe2_pb2.NetDef() with open(args.c2_predict, 'rb') as f: predict_net_proto.ParseFromString(f.read()) model.net = core.Net(predict_net_proto) data_config = resolve_data_config(None, args) loader = create_loader(Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing) input_blob = model.net.external_inputs[0] output_blob = model.net.external_outputs[0] if True: device_opts = None else: device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id) model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True) model.param_init_net.GaussianFill([], input_blob.GetUnscopedName(), shape=(1,) + data_config['input_size'], mean=0.0, std=1.0) workspace.RunNetOnce(model.param_init_net) workspace.CreateNet(model.net, overwrite=True) batch_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() for (i, (input, target)) in enumerate(loader): caffe2_in = input.data.numpy() workspace.FeedBlob(input_blob, caffe2_in, device_opts) workspace.RunNet(model.net, num_iter=1) output = workspace.FetchBlob(output_blob) (prec1, prec5) = accuracy_np(output.data, target.numpy()) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=100 - top1.avg, top5=top5, top5a=100.0 - top5.avg)) def accuracy_np(output, target): max_indices = np.argsort(output, axis=1)[:, ::-1] top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean() top1 = 100 * np.equal(max_indices[:, 0], target).mean() return (top1, top5) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/__init__.py from geffnet import config from geffnet.activations.activations_me import * from geffnet.activations.activations_jit import * from geffnet.activations.activations import * import torch _has_silu = 'silu' in dir(torch.nn.functional) _ACT_FN_DEFAULT = dict(silu=F.silu if _has_silu else swish, swish=F.silu if _has_silu else swish, mish=mish, relu=F.relu, relu6=F.relu6, sigmoid=sigmoid, tanh=tanh, hard_sigmoid=hard_sigmoid, hard_swish=hard_swish) _ACT_FN_JIT = dict(silu=F.silu if _has_silu else swish_jit, swish=F.silu if _has_silu else swish_jit, mish=mish_jit) _ACT_FN_ME = dict(silu=F.silu if _has_silu else swish_me, swish=F.silu if _has_silu else swish_me, mish=mish_me, hard_swish=hard_swish_me, hard_sigmoid_jit=hard_sigmoid_me) _ACT_LAYER_DEFAULT = dict(silu=nn.SiLU if _has_silu else Swish, swish=nn.SiLU if _has_silu else Swish, mish=Mish, relu=nn.ReLU, relu6=nn.ReLU6, sigmoid=Sigmoid, tanh=Tanh, hard_sigmoid=HardSigmoid, hard_swish=HardSwish) _ACT_LAYER_JIT = dict(silu=nn.SiLU if _has_silu else SwishJit, swish=nn.SiLU if _has_silu else SwishJit, mish=MishJit) _ACT_LAYER_ME = dict(silu=nn.SiLU if _has_silu else SwishMe, swish=nn.SiLU if _has_silu else SwishMe, mish=MishMe, hard_swish=HardSwishMe, hard_sigmoid=HardSigmoidMe) _OVERRIDE_FN = dict() _OVERRIDE_LAYER = dict() def add_override_act_fn(name, fn): global _OVERRIDE_FN _OVERRIDE_FN[name] = fn def update_override_act_fn(overrides): assert isinstance(overrides, dict) global _OVERRIDE_FN _OVERRIDE_FN.update(overrides) def clear_override_act_fn(): global _OVERRIDE_FN _OVERRIDE_FN = dict() def add_override_act_layer(name, fn): _OVERRIDE_LAYER[name] = fn def update_override_act_layer(overrides): assert isinstance(overrides, dict) global _OVERRIDE_LAYER _OVERRIDE_LAYER.update(overrides) def clear_override_act_layer(): global _OVERRIDE_LAYER _OVERRIDE_LAYER = dict() def get_act_fn(name='relu'): if name in _OVERRIDE_FN: return _OVERRIDE_FN[name] use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()) if use_me and name in _ACT_FN_ME: return _ACT_FN_ME[name] if config.is_exportable() and name in ('silu', 'swish'): return swish use_jit = not (config.is_exportable() or config.is_no_jit()) if use_jit and name in _ACT_FN_JIT: return _ACT_FN_JIT[name] return _ACT_FN_DEFAULT[name] def get_act_layer(name='relu'): if name in _OVERRIDE_LAYER: return _OVERRIDE_LAYER[name] use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit()) if use_me and name in _ACT_LAYER_ME: return _ACT_LAYER_ME[name] if config.is_exportable() and name in ('silu', 'swish'): return Swish use_jit = not (config.is_exportable() or config.is_no_jit()) if use_jit and name in _ACT_FN_JIT: return _ACT_LAYER_JIT[name] return _ACT_LAYER_DEFAULT[name] # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations.py """""" from torch import nn as nn from torch.nn import functional as F def swish(x, inplace: bool=False): return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) class Swish(nn.Module): def __init__(self, inplace: bool=False): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): return swish(x, self.inplace) def mish(x, inplace: bool=False): return x.mul(F.softplus(x).tanh()) class Mish(nn.Module): def __init__(self, inplace: bool=False): super(Mish, self).__init__() self.inplace = inplace def forward(self, x): return mish(x, self.inplace) def sigmoid(x, inplace: bool=False): return x.sigmoid_() if inplace else x.sigmoid() class Sigmoid(nn.Module): def __init__(self, inplace: bool=False): super(Sigmoid, self).__init__() self.inplace = inplace def forward(self, x): return x.sigmoid_() if self.inplace else x.sigmoid() def tanh(x, inplace: bool=False): return x.tanh_() if inplace else x.tanh() class Tanh(nn.Module): def __init__(self, inplace: bool=False): super(Tanh, self).__init__() self.inplace = inplace def forward(self, x): return x.tanh_() if self.inplace else x.tanh() def hard_swish(x, inplace: bool=False): inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): def __init__(self, inplace: bool=False): super(HardSwish, self).__init__() self.inplace = inplace def forward(self, x): return hard_swish(x, self.inplace) def hard_sigmoid(x, inplace: bool=False): if inplace: return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0) else: return F.relu6(x + 3.0) / 6.0 class HardSigmoid(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoid, self).__init__() self.inplace = inplace def forward(self, x): return hard_sigmoid(x, self.inplace) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_jit.py """""" import torch from torch import nn as nn from torch.nn import functional as F __all__ = ['swish_jit', 'SwishJit', 'mish_jit', 'MishJit', 'hard_sigmoid_jit', 'HardSigmoidJit', 'hard_swish_jit', 'HardSwishJit'] @torch.jit.script def swish_jit(x, inplace: bool=False): return x.mul(x.sigmoid()) @torch.jit.script def mish_jit(x, _inplace: bool=False): return x.mul(F.softplus(x).tanh()) class SwishJit(nn.Module): def __init__(self, inplace: bool=False): super(SwishJit, self).__init__() def forward(self, x): return swish_jit(x) class MishJit(nn.Module): def __init__(self, inplace: bool=False): super(MishJit, self).__init__() def forward(self, x): return mish_jit(x) @torch.jit.script def hard_sigmoid_jit(x, inplace: bool=False): return (x + 3).clamp(min=0, max=6).div(6.0) class HardSigmoidJit(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoidJit, self).__init__() def forward(self, x): return hard_sigmoid_jit(x) @torch.jit.script def hard_swish_jit(x, inplace: bool=False): return x * (x + 3).clamp(min=0, max=6).div(6.0) class HardSwishJit(nn.Module): def __init__(self, inplace: bool=False): super(HardSwishJit, self).__init__() def forward(self, x): return hard_swish_jit(x) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/activations/activations_me.py """""" import torch from torch import nn as nn from torch.nn import functional as F __all__ = ['swish_me', 'SwishMe', 'mish_me', 'MishMe', 'hard_sigmoid_me', 'HardSigmoidMe', 'hard_swish_me', 'HardSwishMe'] @torch.jit.script def swish_jit_fwd(x): return x.mul(torch.sigmoid(x)) @torch.jit.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishJitAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool=False): super(SwishMe, self).__init__() def forward(self, x): return SwishJitAutoFn.apply(x) @torch.jit.script def mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x))) @torch.jit.script def mish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output) def mish_me(x, inplace=False): return MishJitAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool=False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x) @torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool=False): return (x + 3).clamp(min=0, max=6).div(6.0) @torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.0) & (x <= 3.0)) / 6.0 return grad_output * m class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool=False): return HardSigmoidJitAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x) @torch.jit.script def hard_swish_jit_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.0) @torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.0) m = torch.where((x >= -3.0) & (x <= 3.0), x / 3.0 + 0.5, m) return grad_output * m class HardSwishJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool=False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/config.py """""" from typing import Any, Optional __all__ = ['is_exportable', 'is_scriptable', 'is_no_jit', 'layer_config_kwargs', 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config'] _NO_JIT = False _NO_ACTIVATION_JIT = False _EXPORTABLE = False _SCRIPTABLE = False def is_no_jit(): return _NO_JIT class set_no_jit: def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False def is_exportable(): return _EXPORTABLE class set_exportable: def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False def is_scriptable(): return _SCRIPTABLE class set_scriptable: def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False class set_layer_config: def __init__(self, scriptable: Optional[bool]=None, exportable: Optional[bool]=None, no_jit: Optional[bool]=None, no_activation_jit: Optional[bool]=None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) if scriptable is not None: _SCRIPTABLE = scriptable if exportable is not None: _EXPORTABLE = exportable if no_jit is not None: _NO_JIT = no_jit if no_activation_jit is not None: _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT (_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT) = self.prev return False def layer_config_kwargs(kwargs): return set_layer_config(scriptable=kwargs.pop('scriptable', None), exportable=kwargs.pop('exportable', None), no_jit=kwargs.pop('no_jit', None)) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/conv2d_layers.py """""" import collections.abc import math from functools import partial from itertools import repeat from typing import Tuple, Optional import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .config import * def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse _single = _ntuple(1) _pair = _ntuple(2) _triple = _ntuple(3) _quadruple = _ntuple(4) def _is_static_pad(kernel_size, stride=1, dilation=1, **_): return stride == 1 and dilation * (kernel_size - 1) % 2 == 0 def _get_padding(kernel_size, stride=1, dilation=1, **_): padding = (stride - 1 + dilation * (kernel_size - 1)) // 2 return padding def _calc_same_pad(i: int, k: int, s: int, d: int): return max((-(i // -s) - 1) * s + (k - 1) * d + 1 - i, 0) def _same_pad_arg(input_size, kernel_size, stride, dilation): (ih, iw) = input_size (kh, kw) = kernel_size pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split def conv2d_same(x, weight: torch.Tensor, bias: Optional[torch.Tensor]=None, stride: Tuple[int, int]=(1, 1), padding: Tuple[int, int]=(0, 0), dilation: Tuple[int, int]=(1, 1), groups: int=1): (ih, iw) = x.size()[-2:] (kh, kw) = weight.size()[-2:] pad_h = _calc_same_pad(ih, kh, stride[0], dilation[0]) pad_w = _calc_same_pad(iw, kw, stride[1], dilation[1]) x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSame(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) def forward(self, x): return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class Conv2dSameExport(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2dSameExport, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[-2:] if self.pad is None: pad_arg = _same_pad_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size if self.pad is not None: x = self.pad(x) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_padding_value(padding, kernel_size, **kwargs): dynamic = False if isinstance(padding, str): padding = padding.lower() if padding == 'same': if _is_static_pad(kernel_size, **kwargs): padding = _get_padding(kernel_size, **kwargs) else: padding = 0 dynamic = True elif padding == 'valid': padding = 0 else: padding = _get_padding(kernel_size, **kwargs) return (padding, dynamic) def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) (padding, is_dynamic) = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if is_exportable(): assert not is_scriptable() return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) class MixedConv2d(nn.ModuleDict): def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for (idx, (k, in_ch, out_ch)) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = out_ch if depthwise else 1 self.add_module(str(idx), create_conv2d_pad(in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs)) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [conv(x_split[i]) for (i, conv) in enumerate(self.values())] x = torch.cat(x_out, 1) return x def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): num_params = np.prod(expert_shape) if len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params: raise ValueError('CondConv variables must have shape [num_experts, num_params]') for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): __constants__ = ['bias', 'in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) (padding_val, is_padding_dynamic) = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic self.padding = _pair(padding_val) self.dilation = _pair(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer(partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer(partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): (B, C, H, W) = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) x = x.view(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d(x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) return out def select_conv2d(in_chs, out_chs, kernel_size, **kwargs): assert 'groups' not in kwargs if isinstance(kernel_size, list): assert 'num_experts' not in kwargs m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) else: depthwise = kwargs.pop('depthwise', False) groups = out_chs if depthwise else 1 if 'num_experts' in kwargs and kwargs['num_experts'] > 0: m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs) else: m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) return m # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/efficientnet_builder.py """""" import re from copy import deepcopy from .conv2d_layers import * from geffnet.activations import * __all__ = ['get_bn_args_tf', 'resolve_bn_args', 'resolve_se_args', 'resolve_act_layer', 'make_divisible', 'round_channels', 'drop_connect', 'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual', 'EfficientNetBuilder', 'decode_arch_def', 'initialize_weight_default', 'initialize_weight_goog', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 BN_EPS_TF_DEFAULT = 0.001 _BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) def get_bn_args_tf(): return _BN_ARGS_TF.copy() def resolve_bn_args(kwargs): bn_args = get_bn_args_tf() if kwargs.pop('bn_tf', False) else {} bn_momentum = kwargs.pop('bn_momentum', None) if bn_momentum is not None: bn_args['momentum'] = bn_momentum bn_eps = kwargs.pop('bn_eps', None) if bn_eps is not None: bn_args['eps'] = bn_eps return bn_args _SE_ARGS_DEFAULT = dict(gate_fn=sigmoid, act_layer=None, reduce_mid=False, divisor=1) def resolve_se_args(kwargs, in_chs, act_layer=None): se_kwargs = kwargs.copy() if kwargs is not None else {} for (k, v) in _SE_ARGS_DEFAULT.items(): se_kwargs.setdefault(k, v) if not se_kwargs.pop('reduce_mid'): se_kwargs['reduced_base_chs'] = in_chs if se_kwargs['act_layer'] is None: assert act_layer is not None se_kwargs['act_layer'] = act_layer return se_kwargs def resolve_act_layer(kwargs, default='relu'): act_layer = kwargs.pop('act_layer', default) if isinstance(act_layer, str): act_layer = get_act_layer(act_layer) return act_layer def make_divisible(v: int, divisor: int=8, min_value: int=None): min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) if new_v < 0.9 * v: new_v += divisor return new_v def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): if not multiplier: return channels channels *= multiplier return make_divisible(channels, divisor, channel_min) def drop_connect(inputs, training: bool=False, drop_connect_rate: float=0.0): if not training: return inputs keep_prob = 1 - drop_connect_rate random_tensor = keep_prob + torch.rand((inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device) random_tensor.floor_() output = inputs.div(keep_prob) * random_tensor return output class SqueezeExcite(nn.Module): def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None, act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1): super(SqueezeExcite, self).__init__() reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor) self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) self.act1 = act_layer(inplace=True) self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) self.gate_fn = gate_fn def forward(self, x): x_se = x.mean((2, 3), keepdim=True) x_se = self.conv_reduce(x_se) x_se = self.act1(x_se) x_se = self.conv_expand(x_se) x = x * self.gate_fn(x_se) return x class ConvBnAct(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride=1, pad_type='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, norm_kwargs=None): super(ConvBnAct, self).__init__() assert stride in [1, 2] norm_kwargs = norm_kwargs or {} self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type) self.bn1 = norm_layer(out_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn1(x) x = self.act1(x) return x class DepthwiseSeparableConv(nn.Module): def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, pw_act=False, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): super(DepthwiseSeparableConv, self).__init__() assert stride in [1, 2] norm_kwargs = norm_kwargs or {} self.has_residual = (stride == 1 and in_chs == out_chs) and (not noskip) self.drop_connect_rate = drop_connect_rate self.conv_dw = select_conv2d(in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True) self.bn1 = norm_layer(in_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) if se_ratio is not None and se_ratio > 0.0: se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(in_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) self.bn2 = norm_layer(out_chs, **norm_kwargs) self.act2 = act_layer(inplace=True) if pw_act else nn.Identity() def forward(self, x): residual = x x = self.conv_dw(x) x = self.bn1(x) x = self.act1(x) x = self.se(x) x = self.conv_pw(x) x = self.bn2(x) x = self.act2(x) if self.has_residual: if self.drop_connect_rate > 0.0: x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x class InvertedResidual(nn.Module): def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, conv_kwargs=None, drop_connect_rate=0.0): super(InvertedResidual, self).__init__() norm_kwargs = norm_kwargs or {} conv_kwargs = conv_kwargs or {} mid_chs: int = make_divisible(in_chs * exp_ratio) self.has_residual = (in_chs == out_chs and stride == 1) and (not noskip) self.drop_connect_rate = drop_connect_rate self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs) self.bn1 = norm_layer(mid_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) self.conv_dw = select_conv2d(mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True, **conv_kwargs) self.bn2 = norm_layer(mid_chs, **norm_kwargs) self.act2 = act_layer(inplace=True) if se_ratio is not None and se_ratio > 0.0: se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs) self.bn3 = norm_layer(out_chs, **norm_kwargs) def forward(self, x): residual = x x = self.conv_pw(x) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn3(x) if self.has_residual: if self.drop_connect_rate > 0.0: x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x class CondConvResidual(InvertedResidual): def __init__(self, in_chs, out_chs, dw_kernel_size=3, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, num_experts=0, drop_connect_rate=0.0): self.num_experts = num_experts conv_kwargs = dict(num_experts=self.num_experts) super(CondConvResidual, self).__init__(in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, se_ratio=se_ratio, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_kwargs=conv_kwargs, drop_connect_rate=drop_connect_rate) self.routing_fn = nn.Linear(in_chs, self.num_experts) def forward(self, x): residual = x pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs)) x = self.conv_pw(x, routing_weights) x = self.bn1(x) x = self.act1(x) x = self.conv_dw(x, routing_weights) x = self.bn2(x) x = self.act2(x) x = self.se(x) x = self.conv_pwl(x, routing_weights) x = self.bn3(x) if self.has_residual: if self.drop_connect_rate > 0.0: x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x class EdgeResidual(nn.Module): def __init__(self, in_chs, out_chs, exp_kernel_size=3, exp_ratio=1.0, fake_in_chs=0, stride=1, pad_type='', act_layer=nn.ReLU, noskip=False, pw_kernel_size=1, se_ratio=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): super(EdgeResidual, self).__init__() norm_kwargs = norm_kwargs or {} mid_chs = make_divisible(fake_in_chs * exp_ratio) if fake_in_chs > 0 else make_divisible(in_chs * exp_ratio) self.has_residual = (in_chs == out_chs and stride == 1) and (not noskip) self.drop_connect_rate = drop_connect_rate self.conv_exp = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type) self.bn1 = norm_layer(mid_chs, **norm_kwargs) self.act1 = act_layer(inplace=True) if se_ratio is not None and se_ratio > 0.0: se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer) self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs) else: self.se = nn.Identity() self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, stride=stride, padding=pad_type) self.bn2 = nn.BatchNorm2d(out_chs, **norm_kwargs) def forward(self, x): residual = x x = self.conv_exp(x) x = self.bn1(x) x = self.act1(x) x = self.se(x) x = self.conv_pwl(x) x = self.bn2(x) if self.has_residual: if self.drop_connect_rate > 0.0: x = drop_connect(x, self.training, self.drop_connect_rate) x += residual return x class EfficientNetBuilder: def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, pad_type='', act_layer=None, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_connect_rate=0.0): self.channel_multiplier = channel_multiplier self.channel_divisor = channel_divisor self.channel_min = channel_min self.pad_type = pad_type self.act_layer = act_layer self.se_kwargs = se_kwargs self.norm_layer = norm_layer self.norm_kwargs = norm_kwargs self.drop_connect_rate = drop_connect_rate self.in_chs = None self.block_idx = 0 self.block_count = 0 def _round_channels(self, chs): return round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) def _make_block(self, ba): bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self._round_channels(ba['out_chs']) if 'fake_in_chs' in ba and ba['fake_in_chs']: ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) ba['norm_layer'] = self.norm_layer ba['norm_kwargs'] = self.norm_kwargs ba['pad_type'] = self.pad_type ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None if bt == 'ir': ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count ba['se_kwargs'] = self.se_kwargs if ba.get('num_experts', 0) > 0: block = CondConvResidual(**ba) else: block = InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count ba['se_kwargs'] = self.se_kwargs block = DepthwiseSeparableConv(**ba) elif bt == 'er': ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count ba['se_kwargs'] = self.se_kwargs block = EdgeResidual(**ba) elif bt == 'cn': block = ConvBnAct(**ba) else: assert False, 'Uknkown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] return block def _make_stack(self, stack_args): blocks = [] for (i, ba) in enumerate(stack_args): if i >= 1: ba['stride'] = 1 block = self._make_block(ba) blocks.append(block) self.block_idx += 1 return nn.Sequential(*blocks) def __call__(self, in_chs, block_args): self.in_chs = in_chs self.block_count = sum([len(x) for x in block_args]) self.block_idx = 0 blocks = [] for (stack_idx, stack) in enumerate(block_args): assert isinstance(stack, list) stack = self._make_stack(stack) blocks.append(stack) return blocks def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')] def _decode_block_str(block_str): assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] ops = ops[1:] options = {} noskip = False for op in ops: if op == 'noskip': noskip = True elif op.startswith('n'): key = op[0] v = op[1:] if v == 're': value = get_act_layer('relu') elif v == 'r6': value = get_act_layer('relu6') elif v == 'hs': value = get_act_layer('hard_swish') elif v == 'sw': value = get_act_layer('swish') else: continue options[key] = value else: splits = re.split('(\\d.*)', op) if len(splits) >= 2: (key, value) = splits[:2] options[key] = value act_layer = options['n'] if 'n' in options else None exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 fake_in_chs = int(options['fc']) if 'fc' in options else 0 num_repeat = int(options['r']) if block_type == 'ir': block_args = dict(block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=exp_kernel_size, pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, noskip=noskip) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args = dict(block_type=block_type, dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or noskip) elif block_type == 'er': block_args = dict(block_type=block_type, exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=pw_kernel_size, out_chs=int(options['c']), exp_ratio=float(options['e']), fake_in_chs=fake_in_chs, se_ratio=float(options['se']) if 'se' in options else None, stride=int(options['s']), act_layer=act_layer, noskip=noskip) elif block_type == 'cn': block_args = dict(block_type=block_type, kernel_size=int(options['k']), out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer) else: assert False, 'Unknown block type (%s)' % block_type return (block_args, num_repeat) def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): num_repeat = sum(repeats) if depth_trunc == 'round': num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) else: num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) repeats_scaled = [] for r in repeats[::-1]: rs = max(1, round(r / num_repeat * num_repeat_scaled)) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::-1] sa_scaled = [] for (ba, rep) in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False): arch_args = [] for (stack_idx, block_strings) in enumerate(arch_def): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) (ba, rep) = _decode_block_str(block_str) if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: ba['num_experts'] *= experts_multiplier stack_args.append(ba) repeats.append(rep) if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stack_args, repeats, depth_multiplier, depth_trunc)) return arch_args def initialize_weight_goog(m, n='', fix_group_fanout=True): if isinstance(m, CondConv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups init_weight_fn = get_condconv_initializer(lambda w: w.data.normal_(0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) init_weight_fn(m.weight) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): fan_out = m.weight.size(0) fan_in = 0 if 'routing_fn' in n: fan_in = m.weight.size(1) init_range = 1.0 / math.sqrt(fan_in + fan_out) m.weight.data.uniform_(-init_range, init_range) m.bias.data.zero_() def initialize_weight_default(m, n=''): if isinstance(m, CondConv2d): init_fn = get_condconv_initializer(partial(nn.init.kaiming_normal_, mode='fan_out', nonlinearity='relu'), m.num_experts, m.weight_shape) init_fn(m.weight) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1.0) m.bias.data.zero_() elif isinstance(m, nn.Linear): nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear') # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/gen_efficientnet.py """""" import torch.nn as nn import torch.nn.functional as F from .config import layer_config_kwargs, is_scriptable from .conv2d_layers import select_conv2d from .helpers import load_pretrained from .efficientnet_builder import * __all__ = ['GenEfficientNet', 'mnasnet_050', 'mnasnet_075', 'mnasnet_100', 'mnasnet_b1', 'mnasnet_140', 'semnasnet_050', 'semnasnet_075', 'semnasnet_100', 'mnasnet_a1', 'semnasnet_140', 'mnasnet_small', 'mobilenetv2_100', 'mobilenetv2_140', 'mobilenetv2_110d', 'mobilenetv2_120d', 'fbnetc_100', 'spnasnet_100', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b8', 'efficientnet_l2', 'efficientnet_es', 'efficientnet_em', 'efficientnet_el', 'efficientnet_cc_b0_4e', 'efficientnet_cc_b0_8e', 'efficientnet_cc_b1_8e', 'efficientnet_lite0', 'efficientnet_lite1', 'efficientnet_lite2', 'efficientnet_lite3', 'efficientnet_lite4', 'tf_efficientnet_b0', 'tf_efficientnet_b1', 'tf_efficientnet_b2', 'tf_efficientnet_b3', 'tf_efficientnet_b4', 'tf_efficientnet_b5', 'tf_efficientnet_b6', 'tf_efficientnet_b7', 'tf_efficientnet_b8', 'tf_efficientnet_b0_ap', 'tf_efficientnet_b1_ap', 'tf_efficientnet_b2_ap', 'tf_efficientnet_b3_ap', 'tf_efficientnet_b4_ap', 'tf_efficientnet_b5_ap', 'tf_efficientnet_b6_ap', 'tf_efficientnet_b7_ap', 'tf_efficientnet_b8_ap', 'tf_efficientnet_b0_ns', 'tf_efficientnet_b1_ns', 'tf_efficientnet_b2_ns', 'tf_efficientnet_b3_ns', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5_ns', 'tf_efficientnet_b6_ns', 'tf_efficientnet_b7_ns', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', 'tf_efficientnet_es', 'tf_efficientnet_em', 'tf_efficientnet_el', 'tf_efficientnet_cc_b0_4e', 'tf_efficientnet_cc_b0_8e', 'tf_efficientnet_cc_b1_8e', 'tf_efficientnet_lite0', 'tf_efficientnet_lite1', 'tf_efficientnet_lite2', 'tf_efficientnet_lite3', 'tf_efficientnet_lite4', 'mixnet_s', 'mixnet_m', 'mixnet_l', 'mixnet_xl', 'tf_mixnet_s', 'tf_mixnet_m', 'tf_mixnet_l'] model_urls = {'mnasnet_050': None, 'mnasnet_075': None, 'mnasnet_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', 'mnasnet_140': None, 'mnasnet_small': None, 'semnasnet_050': None, 'semnasnet_075': None, 'semnasnet_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', 'semnasnet_140': None, 'mobilenetv2_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', 'mobilenetv2_110d': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', 'mobilenetv2_120d': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', 'mobilenetv2_140': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', 'fbnetc_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', 'spnasnet_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', 'efficientnet_b0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', 'efficientnet_b1': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', 'efficientnet_b2': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', 'efficientnet_b3': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', 'efficientnet_b4': None, 'efficientnet_b5': None, 'efficientnet_b6': None, 'efficientnet_b7': None, 'efficientnet_b8': None, 'efficientnet_l2': None, 'efficientnet_es': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', 'efficientnet_em': None, 'efficientnet_el': None, 'efficientnet_cc_b0_4e': None, 'efficientnet_cc_b0_8e': None, 'efficientnet_cc_b1_8e': None, 'efficientnet_lite0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', 'efficientnet_lite1': None, 'efficientnet_lite2': None, 'efficientnet_lite3': None, 'efficientnet_lite4': None, 'tf_efficientnet_b0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', 'tf_efficientnet_b1': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', 'tf_efficientnet_b2': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', 'tf_efficientnet_b3': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', 'tf_efficientnet_b4': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', 'tf_efficientnet_b5': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', 'tf_efficientnet_b6': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', 'tf_efficientnet_b7': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', 'tf_efficientnet_b8': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', 'tf_efficientnet_b0_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', 'tf_efficientnet_b1_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', 'tf_efficientnet_b2_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', 'tf_efficientnet_b3_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', 'tf_efficientnet_b4_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', 'tf_efficientnet_b5_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', 'tf_efficientnet_b6_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', 'tf_efficientnet_b7_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', 'tf_efficientnet_b8_ap': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', 'tf_efficientnet_b0_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', 'tf_efficientnet_b1_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', 'tf_efficientnet_b2_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', 'tf_efficientnet_b3_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', 'tf_efficientnet_b4_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', 'tf_efficientnet_b5_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', 'tf_efficientnet_b6_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', 'tf_efficientnet_b7_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', 'tf_efficientnet_l2_ns_475': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', 'tf_efficientnet_l2_ns': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', 'tf_efficientnet_es': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', 'tf_efficientnet_em': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', 'tf_efficientnet_el': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', 'tf_efficientnet_cc_b0_4e': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', 'tf_efficientnet_cc_b0_8e': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', 'tf_efficientnet_cc_b1_8e': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', 'tf_efficientnet_lite0': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', 'tf_efficientnet_lite1': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', 'tf_efficientnet_lite2': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', 'tf_efficientnet_lite3': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', 'tf_efficientnet_lite4': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', 'mixnet_s': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', 'mixnet_m': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', 'mixnet_l': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', 'mixnet_xl': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', 'tf_mixnet_s': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', 'tf_mixnet_m': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', 'tf_mixnet_l': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth'} class GenEfficientNet(nn.Module): def __init__(self, block_args, num_classes=1000, in_chans=3, num_features=1280, stem_size=32, fix_stem=False, channel_multiplier=1.0, channel_divisor=8, channel_min=None, pad_type='', act_layer=nn.ReLU, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'): super(GenEfficientNet, self).__init__() self.drop_rate = drop_rate if not fix_stem: stem_size = round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_layer(stem_size, **norm_kwargs) self.act1 = act_layer(inplace=True) in_chs = stem_size builder = EfficientNetBuilder(channel_multiplier, channel_divisor, channel_min, pad_type, act_layer, se_kwargs, norm_layer, norm_kwargs, drop_connect_rate) self.blocks = nn.Sequential(*builder(in_chs, block_args)) in_chs = builder.in_chs self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type) self.bn2 = norm_layer(num_features, **norm_kwargs) self.act2 = act_layer(inplace=True) self.global_pool = nn.AdaptiveAvgPool2d(1) self.classifier = nn.Linear(num_features, num_classes) for (n, m) in self.named_modules(): if weight_init == 'goog': initialize_weight_goog(m, n) else: initialize_weight_default(m, n) def features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) x = self.act2(x) return x def as_sequential(self): layers = [self.conv_stem, self.bn1, self.act1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) def forward(self, x): x = self.features(x) x = self.global_pool(x) x = x.flatten(1) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return self.classifier(x) def _create_model(model_kwargs, variant, pretrained=False): as_sequential = model_kwargs.pop('as_sequential', False) model = GenEfficientNet(**model_kwargs) if pretrained: load_pretrained(model, model_urls[variant]) if as_sequential: model = model.as_sequential() return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_noskip'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k5_s2_e3_c40_se0.25'], ['ir_r4_k3_s2_e6_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r3_k5_s2_e3_c40'], ['ir_r3_k5_s2_e6_c80'], ['ir_r2_k3_s1_e6_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=8, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mobilenet_v2(variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=1280 if fix_stem_head else round_channels(1280, channel_multiplier, 8, None), stem_size=32, fix_stem=fix_stem_head, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), act_layer=nn.ReLU6, **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_efficientnet(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_efficientnet_edge(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_efficientnet_condconv(variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_channels(1280, channel_multiplier, 8, None), stem_size=32, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'swish'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, channel_multiplier=channel_multiplier, act_layer=nn.ReLU6, norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'relu'), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def mnasnet_050(pretrained=False, **kwargs): model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model def mnasnet_075(pretrained=False, **kwargs): model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model def mnasnet_100(pretrained=False, **kwargs): model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model def mnasnet_b1(pretrained=False, **kwargs): return mnasnet_100(pretrained, **kwargs) def mnasnet_140(pretrained=False, **kwargs): model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model def semnasnet_050(pretrained=False, **kwargs): model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model def semnasnet_075(pretrained=False, **kwargs): model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model def semnasnet_100(pretrained=False, **kwargs): model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model def mnasnet_a1(pretrained=False, **kwargs): return semnasnet_100(pretrained, **kwargs) def semnasnet_140(pretrained=False, **kwargs): model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model def mnasnet_small(pretrained=False, **kwargs): model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv2_100(pretrained=False, **kwargs): model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv2_140(pretrained=False, **kwargs): model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model def mobilenetv2_110d(pretrained=False, **kwargs): model = _gen_mobilenet_v2('mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model def mobilenetv2_120d(pretrained=False, **kwargs): model = _gen_mobilenet_v2('mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model def fbnetc_100(pretrained=False, **kwargs): if pretrained: kwargs['bn_eps'] = BN_EPS_TF_DEFAULT model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model def spnasnet_100(pretrained=False, **kwargs): model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model def efficientnet_b0(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def efficientnet_b1(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def efficientnet_b2(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def efficientnet_b3(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def efficientnet_b4(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def efficientnet_b5(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model def efficientnet_b6(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model def efficientnet_b7(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model def efficientnet_b8(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model def efficientnet_l2(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model def efficientnet_es(pretrained=False, **kwargs): model = _gen_efficientnet_edge('efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def efficientnet_em(pretrained=False, **kwargs): model = _gen_efficientnet_edge('efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def efficientnet_el(pretrained=False, **kwargs): model = _gen_efficientnet_edge('efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def efficientnet_cc_b0_4e(pretrained=False, **kwargs): model = _gen_efficientnet_condconv('efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def efficientnet_cc_b0_8e(pretrained=False, **kwargs): model = _gen_efficientnet_condconv('efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model def efficientnet_cc_b1_8e(pretrained=False, **kwargs): model = _gen_efficientnet_condconv('efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model def efficientnet_lite0(pretrained=False, **kwargs): model = _gen_efficientnet_lite('efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def efficientnet_lite1(pretrained=False, **kwargs): model = _gen_efficientnet_lite('efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def efficientnet_lite2(pretrained=False, **kwargs): model = _gen_efficientnet_lite('efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def efficientnet_lite3(pretrained=False, **kwargs): model = _gen_efficientnet_lite('efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def efficientnet_lite4(pretrained=False, **kwargs): model = _gen_efficientnet_lite('efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b0(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b1(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b2(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b3(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b4(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b5(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b6(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b7(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b8(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b0_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b0_ap', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b1_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1_ap', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b2_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b2_ap', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b3_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b3_ap', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b4_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b4_ap', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b5_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b5_ap', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b6_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b6_ap', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b7_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b7_ap', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b8_ap(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b8_ap', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b0_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b0_ns', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b1_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b1_ns', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b2_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b3_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b3_ns', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b4_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b4_ns', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b5_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b5_ns', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b6_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b6_ns', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model def tf_efficientnet_b7_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_l2_ns_475(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_l2_ns_475', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model def tf_efficientnet_l2_ns(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('tf_efficientnet_l2_ns', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model def tf_efficientnet_es(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge('tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_em(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge('tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_el(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge('tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv('tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv('tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_lite0(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite('tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_efficientnet_lite1(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite('tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model def tf_efficientnet_lite2(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite('tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def tf_efficientnet_lite3(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite('tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model def tf_efficientnet_lite4(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite('tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model def mixnet_s(pretrained=False, **kwargs): model = _gen_mixnet_s('mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model def mixnet_m(pretrained=False, **kwargs): model = _gen_mixnet_m('mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model def mixnet_l(pretrained=False, **kwargs): model = _gen_mixnet_m('mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model def mixnet_xl(pretrained=False, **kwargs): model = _gen_mixnet_m('mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model def mixnet_xxl(pretrained=False, **kwargs): model = _gen_mixnet_m('mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model def tf_mixnet_s(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_s('tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_mixnet_m(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_m('tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model def tf_mixnet_l(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_m('tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/helpers.py """""" import torch import os from collections import OrderedDict try: from torch.hub import load_state_dict_from_url except ImportError: from torch.utils.model_zoo import load_url as load_state_dict_from_url def load_checkpoint(model, checkpoint_path): if checkpoint_path and os.path.isfile(checkpoint_path): print("=> Loading checkpoint '{}'".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: new_state_dict = OrderedDict() for (k, v) in checkpoint['state_dict'].items(): if k.startswith('module'): name = k[7:] else: name = k new_state_dict[name] = v model.load_state_dict(new_state_dict) else: model.load_state_dict(checkpoint) print("=> Loaded checkpoint '{}'".format(checkpoint_path)) else: print("=> Error: No checkpoint found at '{}'".format(checkpoint_path)) raise FileNotFoundError() def load_pretrained(model, url, filter_fn=None, strict=True): if not url: print('=> Warning: Pretrained model URL is empty, using random initialization.') return state_dict = load_state_dict_from_url(url, progress=False, map_location='cpu') input_conv = 'conv_stem' classifier = 'classifier' in_chans = getattr(model, input_conv).weight.shape[1] num_classes = getattr(model, classifier).weight.shape[0] input_conv_weight = input_conv + '.weight' pretrained_in_chans = state_dict[input_conv_weight].shape[1] if in_chans != pretrained_in_chans: if in_chans == 1: print('=> Converting pretrained input conv {} from {} to 1 channel'.format(input_conv_weight, pretrained_in_chans)) conv1_weight = state_dict[input_conv_weight] state_dict[input_conv_weight] = conv1_weight.sum(dim=1, keepdim=True) else: print('=> Discarding pretrained input conv {} since input channel count != {}'.format(input_conv_weight, pretrained_in_chans)) del state_dict[input_conv_weight] strict = False classifier_weight = classifier + '.weight' pretrained_num_classes = state_dict[classifier_weight].shape[0] if num_classes != pretrained_num_classes: print('=> Discarding pretrained classifier since num_classes != {}'.format(pretrained_num_classes)) del state_dict[classifier_weight] del state_dict[classifier + '.bias'] strict = False if filter_fn is not None: state_dict = filter_fn(state_dict) model.load_state_dict(state_dict, strict=strict) # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/mobilenetv3.py """""" import torch.nn as nn import torch.nn.functional as F from .activations import get_act_fn, get_act_layer, HardSwish from .config import layer_config_kwargs from .conv2d_layers import select_conv2d from .helpers import load_pretrained from .efficientnet_builder import * __all__ = ['mobilenetv3_rw', 'mobilenetv3_large_075', 'mobilenetv3_large_100', 'mobilenetv3_large_minimal_100', 'mobilenetv3_small_075', 'mobilenetv3_small_100', 'mobilenetv3_small_minimal_100', 'tf_mobilenetv3_large_075', 'tf_mobilenetv3_large_100', 'tf_mobilenetv3_large_minimal_100', 'tf_mobilenetv3_small_075', 'tf_mobilenetv3_small_100', 'tf_mobilenetv3_small_minimal_100'] model_urls = {'mobilenetv3_rw': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', 'mobilenetv3_large_075': None, 'mobilenetv3_large_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', 'mobilenetv3_large_minimal_100': None, 'mobilenetv3_small_075': None, 'mobilenetv3_small_100': None, 'mobilenetv3_small_minimal_100': None, 'tf_mobilenetv3_large_075': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', 'tf_mobilenetv3_large_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', 'tf_mobilenetv3_large_minimal_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', 'tf_mobilenetv3_small_075': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', 'tf_mobilenetv3_small_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', 'tf_mobilenetv3_small_minimal_100': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth'} class MobileNetV3(nn.Module): def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True, channel_multiplier=1.0, pad_type='', act_layer=HardSwish, drop_rate=0.0, drop_connect_rate=0.0, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, weight_init='goog'): super(MobileNetV3, self).__init__() self.drop_rate = drop_rate stem_size = round_channels(stem_size, channel_multiplier) self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = nn.BatchNorm2d(stem_size, **norm_kwargs) self.act1 = act_layer(inplace=True) in_chs = stem_size builder = EfficientNetBuilder(channel_multiplier, pad_type=pad_type, act_layer=act_layer, se_kwargs=se_kwargs, norm_layer=norm_layer, norm_kwargs=norm_kwargs, drop_connect_rate=drop_connect_rate) self.blocks = nn.Sequential(*builder(in_chs, block_args)) in_chs = builder.in_chs self.global_pool = nn.AdaptiveAvgPool2d(1) self.conv_head = select_conv2d(in_chs, num_features, 1, padding=pad_type, bias=head_bias) self.act2 = act_layer(inplace=True) self.classifier = nn.Linear(num_features, num_classes) for m in self.modules(): if weight_init == 'goog': initialize_weight_goog(m) else: initialize_weight_default(m) def as_sequential(self): layers = [self.conv_stem, self.bn1, self.act1] layers.extend(self.blocks) layers.extend([self.global_pool, self.conv_head, self.act2, nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) def features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) x = self.blocks(x) x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) return x def forward(self, x): x = self.features(x) x = x.flatten(1) if self.drop_rate > 0.0: x = F.dropout(x, p=self.drop_rate, training=self.training) return self.classifier(x) def _create_model(model_kwargs, variant, pretrained=False): as_sequential = model_kwargs.pop('as_sequential', False) model = MobileNetV3(**model_kwargs) if pretrained and model_urls[variant]: load_pretrained(model, model_urls[variant]) if as_sequential: model = model.as_sequential() return model def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): arch_def = [['ds_r1_k3_s1_e1_c16_nre_noskip'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), head_bias=False, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_kwargs=dict(gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): if 'small' in variant: num_features = 1024 if 'minimal' in variant: act_layer = 'relu' arch_def = [['ds_r1_k3_s2_e1_c16'], ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], ['ir_r2_k3_s1_e3_c48'], ['ir_r3_k3_s2_e6_c96'], ['cn_r1_k1_s1_c576']] else: act_layer = 'hard_swish' arch_def = [['ds_r1_k3_s2_e1_c16_se0.25_nre'], ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], ['ir_r2_k5_s1_e3_c48_se0.25'], ['ir_r3_k5_s2_e6_c96_se0.25'], ['cn_r1_k1_s1_c576']] else: num_features = 1280 if 'minimal' in variant: act_layer = 'relu' arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], ['ir_r3_k3_s2_e3_c40'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112'], ['ir_r3_k3_s2_e6_c160'], ['cn_r1_k1_s1_c960']] else: act_layer = 'hard_swish' arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r3_k5_s2_e3_c40_se0.25_nre'], ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], ['ir_r2_k3_s1_e6_c112_se0.25'], ['ir_r3_k5_s2_e6_c160_se0.25'], ['cn_r1_k1_s1_c960']] with layer_config_kwargs(kwargs): model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=16, channel_multiplier=channel_multiplier, act_layer=resolve_act_layer(kwargs, act_layer), se_kwargs=dict(act_layer=get_act_layer('relu'), gate_fn=get_act_fn('hard_sigmoid'), reduce_mid=True, divisor=8), norm_kwargs=resolve_bn_args(kwargs), **kwargs) model = _create_model(model_kwargs, variant, pretrained) return model def mobilenetv3_rw(pretrained=False, **kwargs): if pretrained: kwargs['bn_eps'] = BN_EPS_TF_DEFAULT model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv3_large_075(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model def mobilenetv3_large_100(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv3_large_minimal_100(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv3_small_075(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model def mobilenetv3_small_100(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model def mobilenetv3_small_minimal_100(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_large_075(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_large_100(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_small_075(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_small_100(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/geffnet/model_factory.py from .config import set_layer_config from .helpers import load_checkpoint from .gen_efficientnet import * from .mobilenetv3 import * def create_model(model_name='mnasnet_100', pretrained=None, num_classes=1000, in_chans=3, checkpoint_path='', **kwargs): model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs) if model_name in globals(): create_fn = globals()[model_name] model = create_fn(**model_kwargs) else: raise RuntimeError('Unknown model (%s)' % model_name) if checkpoint_path and (not pretrained): load_checkpoint(model, checkpoint_path) return model # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_export.py """""" import argparse import torch import numpy as np import onnx import geffnet parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('output', metavar='ONNX_FILE', help='output model filename') parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100', help='model architecture (default: mobilenetv3_large_100)') parser.add_argument('--opset', type=int, default=10, help='ONNX opset to use (default: 10)') parser.add_argument('--keep-init', action='store_true', default=False, help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.') parser.add_argument('--aten-fallback', action='store_true', default=False, help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.') parser.add_argument('--dynamic-size', action='store_true', default=False, help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.') parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size (default: 1)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--num-classes', type=int, default=1000, help='Number classes in dataset') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to checkpoint (default: none)') def main(): args = parser.parse_args() args.pretrained = True if args.checkpoint: args.pretrained = False print('==> Creating PyTorch {} model'.format(args.model)) model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, exportable=True) model.eval() example_input = torch.randn((args.batch_size, 3, args.img_size or 224, args.img_size or 224), requires_grad=True) model(example_input) print("==> Exporting model to ONNX format at '{}'".format(args.output)) input_names = ['input0'] output_names = ['output0'] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if args.dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if args.aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX torch_out = torch.onnx._export(model, example_input, args.output, export_params=True, verbose=True, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes, opset_version=args.opset, operator_export_type=export_type) print("==> Loading and checking exported model from '{}'".format(args.output)) onnx_model = onnx.load(args.output) onnx.checker.check_model(onnx_model) print('==> Passed') if args.keep_init and args.aten_fallback: import caffe2.python.onnx.backend as onnx_caffe2 print('==> Loading model into Caffe2 backend and comparing forward pass.'.format(args.output)) caffe2_backend = onnx_caffe2.prepare(onnx_model) B = {onnx_model.graph.input[0].name: x.data.numpy()} c2_out = caffe2_backend.run(B)[0] np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5) print('==> Passed') if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_optimize.py """""" import argparse import warnings import onnx from onnx import optimizer parser = argparse.ArgumentParser(description='Optimize ONNX model') parser.add_argument('model', help='The ONNX model') parser.add_argument('--output', required=True, help='The optimized model output filename') def traverse_graph(graph, prefix=''): content = [] indent = prefix + ' ' graphs = [] num_nodes = 0 for node in graph.node: (pn, gs) = onnx.helper.printable_node(node, indent, subgraphs=True) assert isinstance(gs, list) content.append(pn) graphs.extend(gs) num_nodes += 1 for g in graphs: (g_count, g_str) = traverse_graph(g) content.append('\n' + g_str) num_nodes += g_count return (num_nodes, '\n'.join(content)) def main(): args = parser.parse_args() onnx_model = onnx.load(args.model) (num_original_nodes, original_graph_str) = traverse_graph(onnx_model.graph) passes = ['eliminate_identity', 'eliminate_nop_dropout', 'eliminate_nop_pad', 'eliminate_nop_transpose', 'eliminate_unused_initializer', 'extract_constant_to_initializer', 'fuse_add_bias_into_conv', 'fuse_bn_into_conv', 'fuse_consecutive_concats', 'fuse_consecutive_reduce_unsqueeze', 'fuse_consecutive_squeezes', 'fuse_consecutive_transposes', 'fuse_pad_into_conv'] warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX.Try onnxruntime optimization if this doesn't work.") optimized_model = optimizer.optimize(onnx_model, passes) (num_optimized_nodes, optimzied_graph_str) = traverse_graph(optimized_model.graph) print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str)) print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes)) onnx.save(optimized_model, args.output) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_to_caffe.py import argparse import onnx from caffe2.python.onnx.backend import Caffe2Backend parser = argparse.ArgumentParser(description='Convert ONNX to Caffe2') parser.add_argument('model', help='The ONNX model') parser.add_argument('--c2-prefix', required=True, help='The output file prefix for the caffe2 model init and predict file. ') def main(): args = parser.parse_args() onnx_model = onnx.load(args.model) (caffe2_init, caffe2_predict) = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model) caffe2_init_str = caffe2_init.SerializeToString() with open(args.c2_prefix + '.init.pb', 'wb') as f: f.write(caffe2_init_str) caffe2_predict_str = caffe2_predict.SerializeToString() with open(args.c2_prefix + '.predict.pb', 'wb') as f: f.write(caffe2_predict_str) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/onnx_validate.py """""" import argparse import numpy as np import onnxruntime from data import create_loader, resolve_data_config, Dataset from utils import AverageMeter import time parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--onnx-input', default='', type=str, metavar='PATH', help='path to onnx model/weights file') parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH', help='path to output optimized onnx graph') parser.add_argument('--profile', action='store_true', default=False, help='Enable profiler output.') parser.add_argument('-j', '--workers', default=2, type=int, metavar='N', help='number of data loading workers (default: 2)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', help='Override default crop pct of 0.875') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', help='use tensorflow mnasnet preporcessing') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') def main(): args = parser.parse_args() args.gpu_id = 0 sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL if args.profile: sess_options.enable_profiling = True if args.onnx_output_opt: sess_options.optimized_model_filepath = args.onnx_output_opt session = onnxruntime.InferenceSession(args.onnx_input, sess_options) data_config = resolve_data_config(None, args) loader = create_loader(Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=False, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing) input_name = session.get_inputs()[0].name batch_time = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() end = time.time() for (i, (input, target)) in enumerate(loader): output = session.run([], {input_name: input.data.numpy()}) output = output[0] (prec1, prec5) = accuracy_np(output, target.numpy()) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=100 - top1.avg, top5=top5, top5a=100.0 - top5.avg)) def accuracy_np(output, target): max_indices = np.argsort(output, axis=1)[:, ::-1] top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean() top1 = 100 * np.equal(max_indices[:, 0], target).mean() return (top1, top5) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/utils.py import os class AverageMeter: def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): maxk = max(topk) batch_size = target.size(0) (_, pred) = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].reshape(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res def get_outdir(path, *paths, inc=False): outdir = os.path.join(path, *paths) if not os.path.exists(outdir): os.makedirs(outdir) elif inc: count = 1 outdir_inc = outdir + '-' + str(count) while os.path.exists(outdir_inc): count = count + 1 outdir_inc = outdir + '-' + str(count) assert count < 100 outdir = outdir_inc os.makedirs(outdir) return outdir # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/efficientnet_repo/validate.py from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import time import torch import torch.nn as nn import torch.nn.parallel from contextlib import suppress import geffnet from data import Dataset, create_loader, resolve_data_config from utils import accuracy, AverageMeter has_native_amp = False try: if getattr(torch.cuda.amp, 'autocast') is not None: has_native_amp = True except AttributeError: pass torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--model', '-m', metavar='MODEL', default='spnasnet1_00', help='model architecture (default: dpn92)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 2)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--img-size', default=None, type=int, metavar='N', help='Input image dimension, uses model default if empty') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT', help='Override default crop pct of 0.875') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--num-classes', type=int, default=1000, help='Number classes in dataset') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--checkpoint', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use') parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true', help='use tensorflow mnasnet preporcessing') parser.add_argument('--no-cuda', dest='no_cuda', action='store_true', help='') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--amp', action='store_true', default=False, help='Use native Torch AMP mixed precision.') def main(): args = parser.parse_args() if not args.checkpoint and (not args.pretrained): args.pretrained = True amp_autocast = suppress if args.amp: if not has_native_amp: print('Native Torch AMP is not available (requires torch >= 1.6), using FP32.') else: amp_autocast = torch.cuda.amp.autocast model = geffnet.create_model(args.model, num_classes=args.num_classes, in_chans=3, pretrained=args.pretrained, checkpoint_path=args.checkpoint, scriptable=args.torchscript) if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.torchscript: torch.jit.optimized_execution(True) model = torch.jit.script(model) print('Model %s created, param count: %d' % (args.model, sum([m.numel() for m in model.parameters()]))) data_config = resolve_data_config(model, args) criterion = nn.CrossEntropyLoss() if not args.no_cuda: if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda() else: model = model.cuda() criterion = criterion.cuda() loader = create_loader(Dataset(args.data, load_bytes=args.tf_preprocessing), input_size=data_config['input_size'], batch_size=args.batch_size, use_prefetcher=not args.no_cuda, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=data_config['crop_pct'], tensorflow_preprocessing=args.tf_preprocessing) batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() end = time.time() with torch.no_grad(): for (i, (input, target)) in enumerate(loader): if not args.no_cuda: target = target.cuda() input = input.cuda() if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) loss = criterion(output, target) (prec1, prec5) = accuracy(output.data, target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(prec1.item(), input.size(0)) top5.update(prec5.item(), input.size(0)) batch_time.update(time.time() - end) end = time.time() if i % args.print_freq == 0: print('Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \tLoss {loss.val:.4f} ({loss.avg:.4f})\tPrec@1 {top1.val:.3f} ({top1.avg:.3f})\tPrec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, loss=losses, top1=top1, top5=top5)) print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(top1=top1, top1a=100 - top1.avg, top5=top5, top5a=100.0 - top5.avg)) if __name__ == '__main__': main() # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/encoder.py import os import torch import torch.nn as nn import torch.nn.functional as F class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() basemodel_name = 'tf_efficientnet_b5_ap' print('Loading base model ()...'.format(basemodel_name), end='') repo_path = os.path.join(os.path.dirname(__file__), 'efficientnet_repo') basemodel = torch.hub.load(repo_path, basemodel_name, pretrained=False, source='local') print('Done.') print('Removing last two layers (global_pool & classifier).') basemodel.global_pool = nn.Identity() basemodel.classifier = nn.Identity() self.original_model = basemodel def forward(self, x): features = [x] for (k, v) in self.original_model._modules.items(): if k == 'blocks': for (ki, vi) in v._modules.items(): features.append(vi(features[-1])) else: features.append(v(features[-1])) return features # File: controlnet_aux-master/src/controlnet_aux/normalbae/nets/submodules/submodules.py import torch import torch.nn as nn import torch.nn.functional as F class UpSampleBN(nn.Module): def __init__(self, skip_input, output_features): super(UpSampleBN, self).__init__() self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU(), nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(output_features), nn.LeakyReLU()) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True) f = torch.cat([up_x, concat_with], dim=1) return self._net(f) class UpSampleGN(nn.Module): def __init__(self, skip_input, output_features): super(UpSampleGN, self).__init__() self._net = nn.Sequential(Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1), nn.GroupNorm(8, output_features), nn.LeakyReLU(), Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1), nn.GroupNorm(8, output_features), nn.LeakyReLU()) def forward(self, x, concat_with): up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True) f = torch.cat([up_x, concat_with], dim=1) return self._net(f) class Conv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) def forward(self, x): weight = self.weight weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True) weight = weight - weight_mean std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-05 weight = weight / std.expand_as(weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def norm_normalize(norm_out): min_kappa = 0.01 (norm_x, norm_y, norm_z, kappa) = torch.split(norm_out, 1, dim=1) norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10 kappa = F.elu(kappa) + 1.0 + min_kappa final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa], dim=1) return final_out @torch.no_grad() def sample_points(init_normal, gt_norm_mask, sampling_ratio, beta): device = init_normal.device (B, _, H, W) = init_normal.shape N = int(sampling_ratio * H * W) beta = beta uncertainty_map = -1 * init_normal[:, 3, :, :] if gt_norm_mask is not None: gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest') gt_invalid_mask = gt_invalid_mask[:, 0, :, :] < 0.5 uncertainty_map[gt_invalid_mask] = -10000.0 (_, idx) = uncertainty_map.view(B, -1).sort(1, descending=True) if int(beta * N) > 0: importance = idx[:, :int(beta * N)] remaining = idx[:, int(beta * N):] num_coverage = N - int(beta * N) if num_coverage <= 0: samples = importance else: coverage_list = [] for i in range(B): idx_c = torch.randperm(remaining.size()[1]) coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) coverage = torch.cat(coverage_list, dim=0) samples = torch.cat((importance, coverage), dim=1) else: remaining = idx[:, :] num_coverage = N coverage_list = [] for i in range(B): idx_c = torch.randperm(remaining.size()[1]) coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) coverage = torch.cat(coverage_list, dim=0) samples = coverage rows_int = samples // W rows_float = rows_int / float(H - 1) rows_float = rows_float * 2.0 - 1.0 cols_int = samples % W cols_float = cols_int / float(W - 1) cols_float = cols_float * 2.0 - 1.0 point_coords = torch.zeros(B, 1, N, 2) point_coords[:, 0, :, 0] = cols_float point_coords[:, 0, :, 1] = rows_float point_coords = point_coords.to(device) return (point_coords, rows_int, cols_int) # File: controlnet_aux-master/src/controlnet_aux/open_pose/__init__.py import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE' import json import warnings from typing import Callable, List, NamedTuple, Tuple, Union import cv2 import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from . import util from .body import Body, BodyResult, Keypoint from .face import Face from .hand import Hand HandResult = List[Keypoint] FaceResult = List[Keypoint] class PoseResult(NamedTuple): body: BodyResult left_hand: Union[HandResult, None] right_hand: Union[HandResult, None] face: Union[FaceResult, None] def draw_poses(poses: List[PoseResult], H, W, draw_body=True, draw_hand=True, draw_face=True): canvas = np.zeros(shape=(H, W, 3), dtype=np.uint8) for pose in poses: if draw_body: canvas = util.draw_bodypose(canvas, pose.body.keypoints) if draw_hand: canvas = util.draw_handpose(canvas, pose.left_hand) canvas = util.draw_handpose(canvas, pose.right_hand) if draw_face: canvas = util.draw_facepose(canvas, pose.face) return canvas class OpenposeDetector: def __init__(self, body_estimation, hand_estimation=None, face_estimation=None): self.body_estimation = body_estimation self.hand_estimation = hand_estimation self.face_estimation = face_estimation @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, hand_filename=None, face_filename=None, cache_dir=None, local_files_only=False): if pretrained_model_or_path == 'lllyasviel/ControlNet': filename = filename or 'annotator/ckpts/body_pose_model.pth' hand_filename = hand_filename or 'annotator/ckpts/hand_pose_model.pth' face_filename = face_filename or 'facenet.pth' face_pretrained_model_or_path = 'lllyasviel/Annotators' else: filename = filename or 'body_pose_model.pth' hand_filename = hand_filename or 'hand_pose_model.pth' face_filename = face_filename or 'facenet.pth' face_pretrained_model_or_path = pretrained_model_or_path if os.path.isdir(pretrained_model_or_path): body_model_path = os.path.join(pretrained_model_or_path, filename) hand_model_path = os.path.join(pretrained_model_or_path, hand_filename) face_model_path = os.path.join(face_pretrained_model_or_path, face_filename) else: body_model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) hand_model_path = hf_hub_download(pretrained_model_or_path, hand_filename, cache_dir=cache_dir, local_files_only=local_files_only) face_model_path = hf_hub_download(face_pretrained_model_or_path, face_filename, cache_dir=cache_dir, local_files_only=local_files_only) body_estimation = Body(body_model_path) hand_estimation = Hand(hand_model_path) face_estimation = Face(face_model_path) return cls(body_estimation, hand_estimation, face_estimation) def to(self, device): self.body_estimation.to(device) self.hand_estimation.to(device) self.face_estimation.to(device) return self def detect_hands(self, body: BodyResult, oriImg) -> Tuple[Union[HandResult, None], Union[HandResult, None]]: left_hand = None right_hand = None (H, W, _) = oriImg.shape for (x, y, w, is_left) in util.handDetect(body, oriImg): peaks = self.hand_estimation(oriImg[y:y + w, x:x + w, :]).astype(np.float32) if peaks.ndim == 2 and peaks.shape[1] == 2: peaks[:, 0] = np.where(peaks[:, 0] < 1e-06, -1, peaks[:, 0] + x) / float(W) peaks[:, 1] = np.where(peaks[:, 1] < 1e-06, -1, peaks[:, 1] + y) / float(H) hand_result = [Keypoint(x=peak[0], y=peak[1]) for peak in peaks] if is_left: left_hand = hand_result else: right_hand = hand_result return (left_hand, right_hand) def detect_face(self, body: BodyResult, oriImg) -> Union[FaceResult, None]: face = util.faceDetect(body, oriImg) if face is None: return None (x, y, w) = face (H, W, _) = oriImg.shape heatmaps = self.face_estimation(oriImg[y:y + w, x:x + w, :]) peaks = self.face_estimation.compute_peaks_from_heatmaps(heatmaps).astype(np.float32) if peaks.ndim == 2 and peaks.shape[1] == 2: peaks[:, 0] = np.where(peaks[:, 0] < 1e-06, -1, peaks[:, 0] + x) / float(W) peaks[:, 1] = np.where(peaks[:, 1] < 1e-06, -1, peaks[:, 1] + y) / float(H) return [Keypoint(x=peak[0], y=peak[1]) for peak in peaks] return None def detect_poses(self, oriImg, include_hand=False, include_face=False) -> List[PoseResult]: oriImg = oriImg[:, :, ::-1].copy() (H, W, C) = oriImg.shape with torch.no_grad(): (candidate, subset) = self.body_estimation(oriImg) bodies = self.body_estimation.format_body_result(candidate, subset) results = [] for body in bodies: (left_hand, right_hand, face) = (None,) * 3 if include_hand: (left_hand, right_hand) = self.detect_hands(body, oriImg) if include_face: face = self.detect_face(body, oriImg) results.append(PoseResult(BodyResult(keypoints=[Keypoint(x=keypoint.x / float(W), y=keypoint.y / float(H)) if keypoint is not None else None for keypoint in body.keypoints], total_score=body.total_score, total_parts=body.total_parts), left_hand, right_hand, face)) return results def __call__(self, input_image, detect_resolution=512, image_resolution=512, include_body=True, include_hand=False, include_face=False, hand_and_face=None, output_type='pil', **kwargs): if hand_and_face is not None: warnings.warn('hand_and_face is deprecated. Use include_hand and include_face instead.', DeprecationWarning) include_hand = hand_and_face include_face = hand_and_face if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) (H, W, C) = input_image.shape poses = self.detect_poses(input_image, include_hand, include_face) canvas = draw_poses(poses, H, W, draw_body=include_body, draw_hand=include_hand, draw_face=include_face) detected_map = canvas detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/open_pose/body.py import math from typing import List, NamedTuple, Union import cv2 import numpy as np import torch from scipy.ndimage.filters import gaussian_filter from . import util from .model import bodypose_model class Keypoint(NamedTuple): x: float y: float score: float = 1.0 id: int = -1 class BodyResult(NamedTuple): keypoints: List[Union[Keypoint, None]] total_score: float total_parts: int class Body(object): def __init__(self, model_path): self.model = bodypose_model() model_dict = util.transfer(self.model, torch.load(model_path)) self.model.load_state_dict(model_dict) self.model.eval() def to(self, device): self.model.to(device) return self def __call__(self, oriImg): device = next(iter(self.model.parameters())).device scale_search = [0.5] boxsize = 368 stride = 8 padValue = 128 thre1 = 0.1 thre2 = 0.05 multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) for m in range(len(multiplier)): scale = multiplier[m] imageToTest = util.smart_resize_k(oriImg, fx=scale, fy=scale) (imageToTest_padded, pad) = util.padRightDownCorner(imageToTest, stride, padValue) im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 im = np.ascontiguousarray(im) data = torch.from_numpy(im).float() data = data.to(device) with torch.no_grad(): (Mconv7_stage6_L1, Mconv7_stage6_L2) = self.model(data) Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = util.smart_resize(heatmap, (oriImg.shape[0], oriImg.shape[1])) paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) paf = util.smart_resize_k(paf, fx=stride, fy=stride) paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] paf = util.smart_resize(paf, (oriImg.shape[0], oriImg.shape[1])) heatmap_avg += heatmap_avg + heatmap / len(multiplier) paf_avg += +paf / len(multiplier) all_peaks = [] peak_counter = 0 for part in range(18): map_ori = heatmap_avg[:, :, part] one_heatmap = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(one_heatmap.shape) map_left[1:, :] = one_heatmap[:-1, :] map_right = np.zeros(one_heatmap.shape) map_right[:-1, :] = one_heatmap[1:, :] map_up = np.zeros(one_heatmap.shape) map_up[:, 1:] = one_heatmap[:, :-1] map_down = np.zeros(one_heatmap.shape) map_down[:, :-1] = one_heatmap[:, 1:] peaks_binary = np.logical_and.reduce((one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] peak_id = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]] mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38], [45, 46]] connection_all = [] special_k = [] mid_num = 10 for k in range(len(mapIdx)): score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] candA = all_peaks[limbSeq[k][0] - 1] candB = all_peaks[limbSeq[k][1] - 1] nA = len(candA) nB = len(candB) (indexA, indexB) = limbSeq[k] if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) norm = max(0.001, norm) vec = np.divide(vec, norm) startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num))) vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend))]) vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend))]) score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(0.5 * oriImg.shape[0] / norm - 1, 0) criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): (i, j, s) = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) subset = -1 * np.ones((0, 20)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(mapIdx)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] (indexA, indexB) = np.array(limbSeq[k]) - 1 for i in range(len(connection_all[k])): found = 0 subset_idx = [-1, -1] for j in range(len(subset)): if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if subset[j][indexB] != partBs[i]: subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: (j1, j2) = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: subset[j1][:-2] += subset[j2][:-2] + 1 subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif not found and k < 17: row = -1 * np.ones(20) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) deleteIdx = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) return (candidate, subset) @staticmethod def format_body_result(candidate: np.ndarray, subset: np.ndarray) -> List[BodyResult]: return [BodyResult(keypoints=[Keypoint(x=candidate[candidate_index][0], y=candidate[candidate_index][1], score=candidate[candidate_index][2], id=candidate[candidate_index][3]) if candidate_index != -1 else None for candidate_index in person[:18].astype(int)], total_score=person[18], total_parts=person[19]) for person in subset] # File: controlnet_aux-master/src/controlnet_aux/open_pose/face.py import logging import numpy as np import torch import torch.nn.functional as F from torch.nn import Conv2d, MaxPool2d, Module, ReLU, init from torchvision.transforms import ToPILImage, ToTensor from . import util class FaceNet(Module): def __init__(self): super(FaceNet, self).__init__() self.relu = ReLU() self.max_pooling_2d = MaxPool2d(kernel_size=2, stride=2) self.conv1_1 = Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv1_2 = Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1) self.conv2_1 = Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv2_2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv3_1 = Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1) self.conv3_2 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1) self.conv3_3 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1) self.conv3_4 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1) self.conv4_1 = Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv4_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv4_3 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv4_4 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv5_1 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv5_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1) self.conv5_3_CPM = Conv2d(in_channels=512, out_channels=128, kernel_size=3, stride=1, padding=1) self.conv6_1_CPM = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, padding=0) self.conv6_2_CPM = Conv2d(in_channels=512, out_channels=71, kernel_size=1, stride=1, padding=0) self.Mconv1_stage2 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv2_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv3_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv4_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv5_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv6_stage2 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0) self.Mconv7_stage2 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0) self.Mconv1_stage3 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv2_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv3_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv4_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv5_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv6_stage3 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0) self.Mconv7_stage3 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0) self.Mconv1_stage4 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv2_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv3_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv4_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv5_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv6_stage4 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0) self.Mconv7_stage4 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0) self.Mconv1_stage5 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv2_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv3_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv4_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv5_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv6_stage5 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0) self.Mconv7_stage5 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0) self.Mconv1_stage6 = Conv2d(in_channels=199, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv2_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv3_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv4_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv5_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, padding=3) self.Mconv6_stage6 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, padding=0) self.Mconv7_stage6 = Conv2d(in_channels=128, out_channels=71, kernel_size=1, stride=1, padding=0) for m in self.modules(): if isinstance(m, Conv2d): init.constant_(m.bias, 0) def forward(self, x): heatmaps = [] h = self.relu(self.conv1_1(x)) h = self.relu(self.conv1_2(h)) h = self.max_pooling_2d(h) h = self.relu(self.conv2_1(h)) h = self.relu(self.conv2_2(h)) h = self.max_pooling_2d(h) h = self.relu(self.conv3_1(h)) h = self.relu(self.conv3_2(h)) h = self.relu(self.conv3_3(h)) h = self.relu(self.conv3_4(h)) h = self.max_pooling_2d(h) h = self.relu(self.conv4_1(h)) h = self.relu(self.conv4_2(h)) h = self.relu(self.conv4_3(h)) h = self.relu(self.conv4_4(h)) h = self.relu(self.conv5_1(h)) h = self.relu(self.conv5_2(h)) h = self.relu(self.conv5_3_CPM(h)) feature_map = h h = self.relu(self.conv6_1_CPM(h)) h = self.conv6_2_CPM(h) heatmaps.append(h) h = torch.cat([h, feature_map], dim=1) h = self.relu(self.Mconv1_stage2(h)) h = self.relu(self.Mconv2_stage2(h)) h = self.relu(self.Mconv3_stage2(h)) h = self.relu(self.Mconv4_stage2(h)) h = self.relu(self.Mconv5_stage2(h)) h = self.relu(self.Mconv6_stage2(h)) h = self.Mconv7_stage2(h) heatmaps.append(h) h = torch.cat([h, feature_map], dim=1) h = self.relu(self.Mconv1_stage3(h)) h = self.relu(self.Mconv2_stage3(h)) h = self.relu(self.Mconv3_stage3(h)) h = self.relu(self.Mconv4_stage3(h)) h = self.relu(self.Mconv5_stage3(h)) h = self.relu(self.Mconv6_stage3(h)) h = self.Mconv7_stage3(h) heatmaps.append(h) h = torch.cat([h, feature_map], dim=1) h = self.relu(self.Mconv1_stage4(h)) h = self.relu(self.Mconv2_stage4(h)) h = self.relu(self.Mconv3_stage4(h)) h = self.relu(self.Mconv4_stage4(h)) h = self.relu(self.Mconv5_stage4(h)) h = self.relu(self.Mconv6_stage4(h)) h = self.Mconv7_stage4(h) heatmaps.append(h) h = torch.cat([h, feature_map], dim=1) h = self.relu(self.Mconv1_stage5(h)) h = self.relu(self.Mconv2_stage5(h)) h = self.relu(self.Mconv3_stage5(h)) h = self.relu(self.Mconv4_stage5(h)) h = self.relu(self.Mconv5_stage5(h)) h = self.relu(self.Mconv6_stage5(h)) h = self.Mconv7_stage5(h) heatmaps.append(h) h = torch.cat([h, feature_map], dim=1) h = self.relu(self.Mconv1_stage6(h)) h = self.relu(self.Mconv2_stage6(h)) h = self.relu(self.Mconv3_stage6(h)) h = self.relu(self.Mconv4_stage6(h)) h = self.relu(self.Mconv5_stage6(h)) h = self.relu(self.Mconv6_stage6(h)) h = self.Mconv7_stage6(h) heatmaps.append(h) return heatmaps LOG = logging.getLogger(__name__) TOTEN = ToTensor() TOPIL = ToPILImage() params = {'gaussian_sigma': 2.5, 'inference_img_size': 736, 'heatmap_peak_thresh': 0.1, 'crop_scale': 1.5, 'line_indices': [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12], [12, 13], [13, 14], [14, 15], [15, 16], [17, 18], [18, 19], [19, 20], [20, 21], [22, 23], [23, 24], [24, 25], [25, 26], [27, 28], [28, 29], [29, 30], [31, 32], [32, 33], [33, 34], [34, 35], [36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [41, 36], [42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [47, 42], [48, 49], [49, 50], [50, 51], [51, 52], [52, 53], [53, 54], [54, 55], [55, 56], [56, 57], [57, 58], [58, 59], [59, 48], [60, 61], [61, 62], [62, 63], [63, 64], [64, 65], [65, 66], [66, 67], [67, 60]]} class Face(object): def __init__(self, face_model_path, inference_size=None, gaussian_sigma=None, heatmap_peak_thresh=None): self.inference_size = inference_size or params['inference_img_size'] self.sigma = gaussian_sigma or params['gaussian_sigma'] self.threshold = heatmap_peak_thresh or params['heatmap_peak_thresh'] self.model = FaceNet() self.model.load_state_dict(torch.load(face_model_path)) self.model.eval() def to(self, device): self.model.to(device) return self def __call__(self, face_img): device = next(iter(self.model.parameters())).device (H, W, C) = face_img.shape w_size = 384 x_data = torch.from_numpy(util.smart_resize(face_img, (w_size, w_size))).permute([2, 0, 1]) / 256.0 - 0.5 x_data = x_data.to(device) with torch.no_grad(): hs = self.model(x_data[None, ...]) heatmaps = F.interpolate(hs[-1], (H, W), mode='bilinear', align_corners=True).cpu().numpy()[0] return heatmaps def compute_peaks_from_heatmaps(self, heatmaps): all_peaks = [] for part in range(heatmaps.shape[0]): map_ori = heatmaps[part].copy() binary = np.ascontiguousarray(map_ori > 0.05, dtype=np.uint8) if np.sum(binary) == 0: continue positions = np.where(binary > 0.5) intensities = map_ori[positions] mi = np.argmax(intensities) (y, x) = (positions[0][mi], positions[1][mi]) all_peaks.append([x, y]) return np.array(all_peaks) # File: controlnet_aux-master/src/controlnet_aux/open_pose/hand.py import cv2 import numpy as np import torch from scipy.ndimage.filters import gaussian_filter from skimage.measure import label from . import util from .model import handpose_model class Hand(object): def __init__(self, model_path): self.model = handpose_model() model_dict = util.transfer(self.model, torch.load(model_path)) self.model.load_state_dict(model_dict) self.model.eval() def to(self, device): self.model.to(device) return self def __call__(self, oriImgRaw): device = next(iter(self.model.parameters())).device scale_search = [0.5, 1.0, 1.5, 2.0] boxsize = 368 stride = 8 padValue = 128 thre = 0.05 multiplier = [x * boxsize for x in scale_search] wsize = 128 heatmap_avg = np.zeros((wsize, wsize, 22)) (Hr, Wr, Cr) = oriImgRaw.shape oriImg = cv2.GaussianBlur(oriImgRaw, (0, 0), 0.8) for m in range(len(multiplier)): scale = multiplier[m] imageToTest = util.smart_resize(oriImg, (scale, scale)) (imageToTest_padded, pad) = util.padRightDownCorner(imageToTest, stride, padValue) im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 im = np.ascontiguousarray(im) data = torch.from_numpy(im).float() data = data.to(device) with torch.no_grad(): output = self.model(data).cpu().numpy() heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) heatmap = util.smart_resize_k(heatmap, fx=stride, fy=stride) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = util.smart_resize(heatmap, (wsize, wsize)) heatmap_avg += heatmap / len(multiplier) all_peaks = [] for part in range(21): map_ori = heatmap_avg[:, :, part] one_heatmap = gaussian_filter(map_ori, sigma=3) binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) if np.sum(binary) == 0: all_peaks.append([0, 0]) continue (label_img, label_numbers) = label(binary, return_num=True, connectivity=binary.ndim) max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 label_img[label_img != max_index] = 0 map_ori[label_img == 0] = 0 (y, x) = util.npmax(map_ori) y = int(float(y) * float(Hr) / float(wsize)) x = int(float(x) * float(Wr) / float(wsize)) all_peaks.append([x, y]) return np.array(all_peaks) if __name__ == '__main__': hand_estimation = Hand('../model/hand_pose_model.pth') test_image = '../images/hand.jpg' oriImg = cv2.imread(test_image) peaks = hand_estimation(oriImg) canvas = util.draw_handpose(oriImg, peaks, True) cv2.imshow('', canvas) cv2.waitKey(0) # File: controlnet_aux-master/src/controlnet_aux/open_pose/model.py import torch from collections import OrderedDict import torch import torch.nn as nn def make_layers(block, no_relu_layers): layers = [] for (layer_name, v) in block.items(): if 'pool' in layer_name: layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) layers.append((layer_name, layer)) else: conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4]) layers.append((layer_name, conv2d)) if layer_name not in no_relu_layers: layers.append(('relu_' + layer_name, nn.ReLU(inplace=True))) return nn.Sequential(OrderedDict(layers)) class bodypose_model(nn.Module): def __init__(self): super(bodypose_model, self).__init__() no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] blocks = {} block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), ('pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), ('conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3, 1, 1]), ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [256, 128, 3, 1, 1])]) block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])]) block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])]) blocks['block1_1'] = block1_1 blocks['block1_2'] = block1_2 self.model0 = make_layers(block0, no_relu_layers) for i in range(2, 7): blocks['block%d_1' % i] = OrderedDict([('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])]) blocks['block%d_2' % i] = OrderedDict([('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])]) for k in blocks.keys(): blocks[k] = make_layers(blocks[k], no_relu_layers) self.model1_1 = blocks['block1_1'] self.model2_1 = blocks['block2_1'] self.model3_1 = blocks['block3_1'] self.model4_1 = blocks['block4_1'] self.model5_1 = blocks['block5_1'] self.model6_1 = blocks['block6_1'] self.model1_2 = blocks['block1_2'] self.model2_2 = blocks['block2_2'] self.model3_2 = blocks['block3_2'] self.model4_2 = blocks['block4_2'] self.model5_2 = blocks['block5_2'] self.model6_2 = blocks['block6_2'] def forward(self, x): out1 = self.model0(x) out1_1 = self.model1_1(out1) out1_2 = self.model1_2(out1) out2 = torch.cat([out1_1, out1_2, out1], 1) out2_1 = self.model2_1(out2) out2_2 = self.model2_2(out2) out3 = torch.cat([out2_1, out2_2, out1], 1) out3_1 = self.model3_1(out3) out3_2 = self.model3_2(out3) out4 = torch.cat([out3_1, out3_2, out1], 1) out4_1 = self.model4_1(out4) out4_2 = self.model4_2(out4) out5 = torch.cat([out4_1, out4_2, out1], 1) out5_1 = self.model5_1(out5) out5_2 = self.model5_2(out5) out6 = torch.cat([out5_1, out5_2, out1], 1) out6_1 = self.model6_1(out6) out6_2 = self.model6_2(out6) return (out6_1, out6_2) class handpose_model(nn.Module): def __init__(self): super(handpose_model, self).__init__() no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), ('pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), ('conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3, 1, 1]), ('conv4_3', [512, 512, 3, 1, 1]), ('conv4_4', [512, 512, 3, 1, 1]), ('conv5_1', [512, 512, 3, 1, 1]), ('conv5_2', [512, 512, 3, 1, 1]), ('conv5_3_CPM', [512, 128, 3, 1, 1])]) block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]), ('conv6_2_CPM', [512, 22, 1, 1, 0])]) blocks = {} blocks['block1_0'] = block1_0 blocks['block1_1'] = block1_1 for i in range(2, 7): blocks['block%d' % i] = OrderedDict([('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])]) for k in blocks.keys(): blocks[k] = make_layers(blocks[k], no_relu_layers) self.model1_0 = blocks['block1_0'] self.model1_1 = blocks['block1_1'] self.model2 = blocks['block2'] self.model3 = blocks['block3'] self.model4 = blocks['block4'] self.model5 = blocks['block5'] self.model6 = blocks['block6'] def forward(self, x): out1_0 = self.model1_0(x) out1_1 = self.model1_1(out1_0) concat_stage2 = torch.cat([out1_1, out1_0], 1) out_stage2 = self.model2(concat_stage2) concat_stage3 = torch.cat([out_stage2, out1_0], 1) out_stage3 = self.model3(concat_stage3) concat_stage4 = torch.cat([out_stage3, out1_0], 1) out_stage4 = self.model4(concat_stage4) concat_stage5 = torch.cat([out_stage4, out1_0], 1) out_stage5 = self.model5(concat_stage5) concat_stage6 = torch.cat([out_stage5, out1_0], 1) out_stage6 = self.model6(concat_stage6) return out_stage6 # File: controlnet_aux-master/src/controlnet_aux/open_pose/util.py import math import numpy as np import cv2 from typing import List, Tuple, Union from .body import BodyResult, Keypoint eps = 0.01 def smart_resize(x, s): (Ht, Wt) = s if x.ndim == 2: (Ho, Wo) = x.shape Co = 1 else: (Ho, Wo, Co) = x.shape if Co == 3 or Co == 1: k = float(Ht + Wt) / float(Ho + Wo) return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) else: return np.stack([smart_resize(x[:, :, i], s) for i in range(Co)], axis=2) def smart_resize_k(x, fx, fy): if x.ndim == 2: (Ho, Wo) = x.shape Co = 1 else: (Ho, Wo, Co) = x.shape (Ht, Wt) = (Ho * fy, Wo * fx) if Co == 3 or Co == 1: k = float(Ht + Wt) / float(Ho + Wo) return cv2.resize(x, (int(Wt), int(Ht)), interpolation=cv2.INTER_AREA if k < 1 else cv2.INTER_LANCZOS4) else: return np.stack([smart_resize_k(x[:, :, i], fx, fy) for i in range(Co)], axis=2) def padRightDownCorner(img, stride, padValue): h = img.shape[0] w = img.shape[1] pad = 4 * [None] pad[0] = 0 pad[1] = 0 pad[2] = 0 if h % stride == 0 else stride - h % stride pad[3] = 0 if w % stride == 0 else stride - w % stride img_padded = img pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1)) img_padded = np.concatenate((pad_up, img_padded), axis=0) pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1)) img_padded = np.concatenate((pad_left, img_padded), axis=1) pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1)) img_padded = np.concatenate((img_padded, pad_down), axis=0) pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1)) img_padded = np.concatenate((img_padded, pad_right), axis=1) return (img_padded, pad) def transfer(model, model_weights): transfered_model_weights = {} for weights_name in model.state_dict().keys(): transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] return transfered_model_weights def draw_bodypose(canvas: np.ndarray, keypoints: List[Keypoint]) -> np.ndarray: (H, W, C) = canvas.shape stickwidth = 4 limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], [1, 16], [16, 18]] colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] for ((k1_index, k2_index), color) in zip(limbSeq, colors): keypoint1 = keypoints[k1_index - 1] keypoint2 = keypoints[k2_index - 1] if keypoint1 is None or keypoint2 is None: continue Y = np.array([keypoint1.x, keypoint2.x]) * float(W) X = np.array([keypoint1.y, keypoint2.y]) * float(H) mX = np.mean(X) mY = np.mean(Y) length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) cv2.fillConvexPoly(canvas, polygon, [int(float(c) * 0.6) for c in color]) for (keypoint, color) in zip(keypoints, colors): if keypoint is None: continue (x, y) = (keypoint.x, keypoint.y) x = int(x * W) y = int(y * H) cv2.circle(canvas, (int(x), int(y)), 4, color, thickness=-1) return canvas def draw_handpose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: import matplotlib '' if not keypoints: return canvas (H, W, C) = canvas.shape edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] for (ie, (e1, e2)) in enumerate(edges): k1 = keypoints[e1] k2 = keypoints[e2] if k1 is None or k2 is None: continue x1 = int(k1.x * W) y1 = int(k1.y * H) x2 = int(k2.x * W) y2 = int(k2.y * H) if x1 > eps and y1 > eps and (x2 > eps) and (y2 > eps): cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie / float(len(edges)), 1.0, 1.0]) * 255, thickness=2) for keypoint in keypoints: (x, y) = (keypoint.x, keypoint.y) x = int(x * W) y = int(y * H) if x > eps and y > eps: cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) return canvas def draw_facepose(canvas: np.ndarray, keypoints: Union[List[Keypoint], None]) -> np.ndarray: if not keypoints: return canvas (H, W, C) = canvas.shape for keypoint in keypoints: (x, y) = (keypoint.x, keypoint.y) x = int(x * W) y = int(y * H) if x > eps and y > eps: cv2.circle(canvas, (x, y), 3, (255, 255, 255), thickness=-1) return canvas def handDetect(body: BodyResult, oriImg) -> List[Tuple[int, int, int, bool]]: ratioWristElbow = 0.33 detect_result = [] (image_height, image_width) = oriImg.shape[0:2] keypoints = body.keypoints left_shoulder = keypoints[5] left_elbow = keypoints[6] left_wrist = keypoints[7] right_shoulder = keypoints[2] right_elbow = keypoints[3] right_wrist = keypoints[4] has_left = all((keypoint is not None for keypoint in (left_shoulder, left_elbow, left_wrist))) has_right = all((keypoint is not None for keypoint in (right_shoulder, right_elbow, right_wrist))) if not (has_left or has_right): return [] hands = [] if has_left: hands.append([left_shoulder.x, left_shoulder.y, left_elbow.x, left_elbow.y, left_wrist.x, left_wrist.y, True]) if has_right: hands.append([right_shoulder.x, right_shoulder.y, right_elbow.x, right_elbow.y, right_wrist.x, right_wrist.y, False]) for (x1, y1, x2, y2, x3, y3, is_left) in hands: x = x3 + ratioWristElbow * (x3 - x2) y = y3 + ratioWristElbow * (y3 - y2) distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) x -= width / 2 y -= width / 2 if x < 0: x = 0 if y < 0: y = 0 width1 = width width2 = width if x + width > image_width: width1 = image_width - x if y + width > image_height: width2 = image_height - y width = min(width1, width2) if width >= 20: detect_result.append((int(x), int(y), int(width), is_left)) '' return detect_result def faceDetect(body: BodyResult, oriImg) -> Union[Tuple[int, int, int], None]: (image_height, image_width) = oriImg.shape[0:2] keypoints = body.keypoints head = keypoints[0] left_eye = keypoints[14] right_eye = keypoints[15] left_ear = keypoints[16] right_ear = keypoints[17] if head is None or all((keypoint is None for keypoint in (left_eye, right_eye, left_ear, right_ear))): return None width = 0.0 (x0, y0) = (head.x, head.y) if left_eye is not None: (x1, y1) = (left_eye.x, left_eye.y) d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 3.0) if right_eye is not None: (x1, y1) = (right_eye.x, right_eye.y) d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 3.0) if left_ear is not None: (x1, y1) = (left_ear.x, left_ear.y) d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 1.5) if right_ear is not None: (x1, y1) = (right_ear.x, right_ear.y) d = max(abs(x0 - x1), abs(y0 - y1)) width = max(width, d * 1.5) (x, y) = (x0, y0) x -= width y -= width if x < 0: x = 0 if y < 0: y = 0 width1 = width * 2 width2 = width * 2 if x + width > image_width: width1 = image_width - x if y + width > image_height: width2 = image_height - y width = min(width1, width2) if width >= 20: return (int(x), int(y), int(width)) else: return None def npmax(array): arrayindex = array.argmax(1) arrayvalue = array.max(1) i = arrayvalue.argmax() j = arrayindex[i] return (i, j) # File: controlnet_aux-master/src/controlnet_aux/pidi/__init__.py import os import warnings import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, nms, resize_image, safe_step from .model import pidinet class PidiNetDetector: def __init__(self, netNetwork): self.netNetwork = netNetwork @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, cache_dir=None, local_files_only=False): filename = filename or 'table5_pidinet.pth' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) netNetwork = pidinet() netNetwork.load_state_dict({k.replace('module.', ''): v for (k, v) in torch.load(model_path)['state_dict'].items()}) netNetwork.eval() return cls(netNetwork) def to(self, device): self.netNetwork.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, safe=False, output_type='pil', scribble=False, apply_filter=False, **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' device = next(iter(self.netNetwork.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 input_image = input_image[:, :, ::-1].copy() with torch.no_grad(): image_pidi = torch.from_numpy(input_image).float().to(device) image_pidi = image_pidi / 255.0 image_pidi = rearrange(image_pidi, 'h w c -> 1 c h w') edge = self.netNetwork(image_pidi)[-1] edge = edge.cpu().numpy() if apply_filter: edge = edge > 0.5 if safe: edge = safe_step(edge) edge = (edge * 255.0).clip(0, 255).astype(np.uint8) detected_map = edge[0, 0] detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if scribble: detected_map = nms(detected_map, 127, 3.0) detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0) detected_map[detected_map > 4] = 255 detected_map[detected_map < 255] = 0 if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/pidi/model.py """""" import math import cv2 import numpy as np import torch import torch.nn as nn import torch.nn.functional as F def img2tensor(imgs, bgr2rgb=True, float32=True): def _totensor(img, bgr2rgb, float32): if img.shape[2] == 3 and bgr2rgb: if img.dtype == 'float64': img = img.astype('float32') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = torch.from_numpy(img.transpose(2, 0, 1)) if float32: img = img.float() return img if isinstance(imgs, list): return [_totensor(img, bgr2rgb, float32) for img in imgs] else: return _totensor(imgs, bgr2rgb, float32) nets = {'baseline': {'layer0': 'cv', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'cv', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'cv', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'cv', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'c-v15': {'layer0': 'cd', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'cv', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'cv', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'cv', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'a-v15': {'layer0': 'ad', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'cv', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'cv', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'cv', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'r-v15': {'layer0': 'rd', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'cv', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'cv', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'cv', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'cvvv4': {'layer0': 'cd', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'cd', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'cd', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'cd', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'avvv4': {'layer0': 'ad', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'ad', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'ad', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'ad', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'rvvv4': {'layer0': 'rd', 'layer1': 'cv', 'layer2': 'cv', 'layer3': 'cv', 'layer4': 'rd', 'layer5': 'cv', 'layer6': 'cv', 'layer7': 'cv', 'layer8': 'rd', 'layer9': 'cv', 'layer10': 'cv', 'layer11': 'cv', 'layer12': 'rd', 'layer13': 'cv', 'layer14': 'cv', 'layer15': 'cv'}, 'cccv4': {'layer0': 'cd', 'layer1': 'cd', 'layer2': 'cd', 'layer3': 'cv', 'layer4': 'cd', 'layer5': 'cd', 'layer6': 'cd', 'layer7': 'cv', 'layer8': 'cd', 'layer9': 'cd', 'layer10': 'cd', 'layer11': 'cv', 'layer12': 'cd', 'layer13': 'cd', 'layer14': 'cd', 'layer15': 'cv'}, 'aaav4': {'layer0': 'ad', 'layer1': 'ad', 'layer2': 'ad', 'layer3': 'cv', 'layer4': 'ad', 'layer5': 'ad', 'layer6': 'ad', 'layer7': 'cv', 'layer8': 'ad', 'layer9': 'ad', 'layer10': 'ad', 'layer11': 'cv', 'layer12': 'ad', 'layer13': 'ad', 'layer14': 'ad', 'layer15': 'cv'}, 'rrrv4': {'layer0': 'rd', 'layer1': 'rd', 'layer2': 'rd', 'layer3': 'cv', 'layer4': 'rd', 'layer5': 'rd', 'layer6': 'rd', 'layer7': 'cv', 'layer8': 'rd', 'layer9': 'rd', 'layer10': 'rd', 'layer11': 'cv', 'layer12': 'rd', 'layer13': 'rd', 'layer14': 'rd', 'layer15': 'cv'}, 'c16': {'layer0': 'cd', 'layer1': 'cd', 'layer2': 'cd', 'layer3': 'cd', 'layer4': 'cd', 'layer5': 'cd', 'layer6': 'cd', 'layer7': 'cd', 'layer8': 'cd', 'layer9': 'cd', 'layer10': 'cd', 'layer11': 'cd', 'layer12': 'cd', 'layer13': 'cd', 'layer14': 'cd', 'layer15': 'cd'}, 'a16': {'layer0': 'ad', 'layer1': 'ad', 'layer2': 'ad', 'layer3': 'ad', 'layer4': 'ad', 'layer5': 'ad', 'layer6': 'ad', 'layer7': 'ad', 'layer8': 'ad', 'layer9': 'ad', 'layer10': 'ad', 'layer11': 'ad', 'layer12': 'ad', 'layer13': 'ad', 'layer14': 'ad', 'layer15': 'ad'}, 'r16': {'layer0': 'rd', 'layer1': 'rd', 'layer2': 'rd', 'layer3': 'rd', 'layer4': 'rd', 'layer5': 'rd', 'layer6': 'rd', 'layer7': 'rd', 'layer8': 'rd', 'layer9': 'rd', 'layer10': 'rd', 'layer11': 'rd', 'layer12': 'rd', 'layer13': 'rd', 'layer14': 'rd', 'layer15': 'rd'}, 'carv4': {'layer0': 'cd', 'layer1': 'ad', 'layer2': 'rd', 'layer3': 'cv', 'layer4': 'cd', 'layer5': 'ad', 'layer6': 'rd', 'layer7': 'cv', 'layer8': 'cd', 'layer9': 'ad', 'layer10': 'rd', 'layer11': 'cv', 'layer12': 'cd', 'layer13': 'ad', 'layer14': 'rd', 'layer15': 'cv'}} def createConvFunc(op_type): assert op_type in ['cv', 'cd', 'ad', 'rd'], 'unknown op type: %s' % str(op_type) if op_type == 'cv': return F.conv2d if op_type == 'cd': def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): assert dilation in [1, 2], 'dilation for cd_conv should be in 1 or 2' assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for cd_conv should be 3x3' assert padding == dilation, 'padding for cd_conv set wrong' weights_c = weights.sum(dim=[2, 3], keepdim=True) yc = F.conv2d(x, weights_c, stride=stride, padding=0, groups=groups) y = F.conv2d(x, weights, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) return y - yc return func elif op_type == 'ad': def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): assert dilation in [1, 2], 'dilation for ad_conv should be in 1 or 2' assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for ad_conv should be 3x3' assert padding == dilation, 'padding for ad_conv set wrong' shape = weights.shape weights = weights.view(shape[0], shape[1], -1) weights_conv = (weights - weights[:, :, [3, 0, 1, 6, 4, 2, 7, 8, 5]]).view(shape) y = F.conv2d(x, weights_conv, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) return y return func elif op_type == 'rd': def func(x, weights, bias=None, stride=1, padding=0, dilation=1, groups=1): assert dilation in [1, 2], 'dilation for rd_conv should be in 1 or 2' assert weights.size(2) == 3 and weights.size(3) == 3, 'kernel size for rd_conv should be 3x3' padding = 2 * dilation shape = weights.shape if weights.is_cuda: buffer = torch.cuda.FloatTensor(shape[0], shape[1], 5 * 5).fill_(0) else: buffer = torch.zeros(shape[0], shape[1], 5 * 5).to(weights.device) weights = weights.view(shape[0], shape[1], -1) buffer[:, :, [0, 2, 4, 10, 14, 20, 22, 24]] = weights[:, :, 1:] buffer[:, :, [6, 7, 8, 11, 13, 16, 17, 18]] = -weights[:, :, 1:] buffer[:, :, 12] = 0 buffer = buffer.view(shape[0], shape[1], 5, 5) y = F.conv2d(x, buffer, bias, stride=stride, padding=padding, dilation=dilation, groups=groups) return y return func else: print('impossible to be here unless you force that') return None class Conv2d(nn.Module): def __init__(self, pdc, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False): super(Conv2d, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, kernel_size, kernel_size)) if bias: self.bias = nn.Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() self.pdc = pdc def reset_parameters(self): nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: (fan_in, _) = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) nn.init.uniform_(self.bias, -bound, bound) def forward(self, input): return self.pdc(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class CSAM(nn.Module): def __init__(self, channels): super(CSAM, self).__init__() mid_channels = 4 self.relu1 = nn.ReLU() self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1, padding=0) self.conv2 = nn.Conv2d(mid_channels, 1, kernel_size=3, padding=1, bias=False) self.sigmoid = nn.Sigmoid() nn.init.constant_(self.conv1.bias, 0) def forward(self, x): y = self.relu1(x) y = self.conv1(y) y = self.conv2(y) y = self.sigmoid(y) return x * y class CDCM(nn.Module): def __init__(self, in_channels, out_channels): super(CDCM, self).__init__() self.relu1 = nn.ReLU() self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0) self.conv2_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=5, padding=5, bias=False) self.conv2_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=7, padding=7, bias=False) self.conv2_3 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=9, padding=9, bias=False) self.conv2_4 = nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=11, padding=11, bias=False) nn.init.constant_(self.conv1.bias, 0) def forward(self, x): x = self.relu1(x) x = self.conv1(x) x1 = self.conv2_1(x) x2 = self.conv2_2(x) x3 = self.conv2_3(x) x4 = self.conv2_4(x) return x1 + x2 + x3 + x4 class MapReduce(nn.Module): def __init__(self, channels): super(MapReduce, self).__init__() self.conv = nn.Conv2d(channels, 1, kernel_size=1, padding=0) nn.init.constant_(self.conv.bias, 0) def forward(self, x): return self.conv(x) class PDCBlock(nn.Module): def __init__(self, pdc, inplane, ouplane, stride=1): super(PDCBlock, self).__init__() self.stride = stride self.stride = stride if self.stride > 1: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0) self.conv1 = Conv2d(pdc, inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False) self.relu2 = nn.ReLU() self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False) def forward(self, x): if self.stride > 1: x = self.pool(x) y = self.conv1(x) y = self.relu2(y) y = self.conv2(y) if self.stride > 1: x = self.shortcut(x) y = y + x return y class PDCBlock_converted(nn.Module): def __init__(self, pdc, inplane, ouplane, stride=1): super(PDCBlock_converted, self).__init__() self.stride = stride if self.stride > 1: self.pool = nn.MaxPool2d(kernel_size=2, stride=2) self.shortcut = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0) if pdc == 'rd': self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=5, padding=2, groups=inplane, bias=False) else: self.conv1 = nn.Conv2d(inplane, inplane, kernel_size=3, padding=1, groups=inplane, bias=False) self.relu2 = nn.ReLU() self.conv2 = nn.Conv2d(inplane, ouplane, kernel_size=1, padding=0, bias=False) def forward(self, x): if self.stride > 1: x = self.pool(x) y = self.conv1(x) y = self.relu2(y) y = self.conv2(y) if self.stride > 1: x = self.shortcut(x) y = y + x return y class PiDiNet(nn.Module): def __init__(self, inplane, pdcs, dil=None, sa=False, convert=False): super(PiDiNet, self).__init__() self.sa = sa if dil is not None: assert isinstance(dil, int), 'dil should be an int' self.dil = dil self.fuseplanes = [] self.inplane = inplane if convert: if pdcs[0] == 'rd': init_kernel_size = 5 init_padding = 2 else: init_kernel_size = 3 init_padding = 1 self.init_block = nn.Conv2d(3, self.inplane, kernel_size=init_kernel_size, padding=init_padding, bias=False) block_class = PDCBlock_converted else: self.init_block = Conv2d(pdcs[0], 3, self.inplane, kernel_size=3, padding=1) block_class = PDCBlock self.block1_1 = block_class(pdcs[1], self.inplane, self.inplane) self.block1_2 = block_class(pdcs[2], self.inplane, self.inplane) self.block1_3 = block_class(pdcs[3], self.inplane, self.inplane) self.fuseplanes.append(self.inplane) inplane = self.inplane self.inplane = self.inplane * 2 self.block2_1 = block_class(pdcs[4], inplane, self.inplane, stride=2) self.block2_2 = block_class(pdcs[5], self.inplane, self.inplane) self.block2_3 = block_class(pdcs[6], self.inplane, self.inplane) self.block2_4 = block_class(pdcs[7], self.inplane, self.inplane) self.fuseplanes.append(self.inplane) inplane = self.inplane self.inplane = self.inplane * 2 self.block3_1 = block_class(pdcs[8], inplane, self.inplane, stride=2) self.block3_2 = block_class(pdcs[9], self.inplane, self.inplane) self.block3_3 = block_class(pdcs[10], self.inplane, self.inplane) self.block3_4 = block_class(pdcs[11], self.inplane, self.inplane) self.fuseplanes.append(self.inplane) self.block4_1 = block_class(pdcs[12], self.inplane, self.inplane, stride=2) self.block4_2 = block_class(pdcs[13], self.inplane, self.inplane) self.block4_3 = block_class(pdcs[14], self.inplane, self.inplane) self.block4_4 = block_class(pdcs[15], self.inplane, self.inplane) self.fuseplanes.append(self.inplane) self.conv_reduces = nn.ModuleList() if self.sa and self.dil is not None: self.attentions = nn.ModuleList() self.dilations = nn.ModuleList() for i in range(4): self.dilations.append(CDCM(self.fuseplanes[i], self.dil)) self.attentions.append(CSAM(self.dil)) self.conv_reduces.append(MapReduce(self.dil)) elif self.sa: self.attentions = nn.ModuleList() for i in range(4): self.attentions.append(CSAM(self.fuseplanes[i])) self.conv_reduces.append(MapReduce(self.fuseplanes[i])) elif self.dil is not None: self.dilations = nn.ModuleList() for i in range(4): self.dilations.append(CDCM(self.fuseplanes[i], self.dil)) self.conv_reduces.append(MapReduce(self.dil)) else: for i in range(4): self.conv_reduces.append(MapReduce(self.fuseplanes[i])) self.classifier = nn.Conv2d(4, 1, kernel_size=1) nn.init.constant_(self.classifier.weight, 0.25) nn.init.constant_(self.classifier.bias, 0) def get_weights(self): conv_weights = [] bn_weights = [] relu_weights = [] for (pname, p) in self.named_parameters(): if 'bn' in pname: bn_weights.append(p) elif 'relu' in pname: relu_weights.append(p) else: conv_weights.append(p) return (conv_weights, bn_weights, relu_weights) def forward(self, x): (H, W) = x.size()[2:] x = self.init_block(x) x1 = self.block1_1(x) x1 = self.block1_2(x1) x1 = self.block1_3(x1) x2 = self.block2_1(x1) x2 = self.block2_2(x2) x2 = self.block2_3(x2) x2 = self.block2_4(x2) x3 = self.block3_1(x2) x3 = self.block3_2(x3) x3 = self.block3_3(x3) x3 = self.block3_4(x3) x4 = self.block4_1(x3) x4 = self.block4_2(x4) x4 = self.block4_3(x4) x4 = self.block4_4(x4) x_fuses = [] if self.sa and self.dil is not None: for (i, xi) in enumerate([x1, x2, x3, x4]): x_fuses.append(self.attentions[i](self.dilations[i](xi))) elif self.sa: for (i, xi) in enumerate([x1, x2, x3, x4]): x_fuses.append(self.attentions[i](xi)) elif self.dil is not None: for (i, xi) in enumerate([x1, x2, x3, x4]): x_fuses.append(self.dilations[i](xi)) else: x_fuses = [x1, x2, x3, x4] e1 = self.conv_reduces[0](x_fuses[0]) e1 = F.interpolate(e1, (H, W), mode='bilinear', align_corners=False) e2 = self.conv_reduces[1](x_fuses[1]) e2 = F.interpolate(e2, (H, W), mode='bilinear', align_corners=False) e3 = self.conv_reduces[2](x_fuses[2]) e3 = F.interpolate(e3, (H, W), mode='bilinear', align_corners=False) e4 = self.conv_reduces[3](x_fuses[3]) e4 = F.interpolate(e4, (H, W), mode='bilinear', align_corners=False) outputs = [e1, e2, e3, e4] output = self.classifier(torch.cat(outputs, dim=1)) outputs.append(output) outputs = [torch.sigmoid(r) for r in outputs] return outputs def config_model(model): model_options = list(nets.keys()) assert model in model_options, 'unrecognized model, please choose from %s' % str(model_options) pdcs = [] for i in range(16): layer_name = 'layer%d' % i op = nets[model][layer_name] pdcs.append(createConvFunc(op)) return pdcs def pidinet(): pdcs = config_model('carv4') dil = 24 return PiDiNet(60, pdcs, dil=dil, sa=True) if __name__ == '__main__': model = pidinet() ckp = torch.load('table5_pidinet.pth')['state_dict'] model.load_state_dict({k.replace('module.', ''): v for (k, v) in ckp.items()}) im = cv2.imread('examples/test_my/cat_v4.png') im = img2tensor(im).unsqueeze(0) / 255.0 res = model(im)[-1] res = res > 0.5 res = res.float() res = (res[0, 0].cpu().data.numpy() * 255.0).astype(np.uint8) print(res.shape) cv2.imwrite('edge.png', res) # File: controlnet_aux-master/src/controlnet_aux/processor.py """""" import io import logging from typing import Dict, Optional, Union from PIL import Image from controlnet_aux import CannyDetector, ContentShuffleDetector, HEDdetector, LeresDetector, LineartAnimeDetector, LineartDetector, MediapipeFaceDetector, MidasDetector, MLSDdetector, NormalBaeDetector, OpenposeDetector, PidiNetDetector, ZoeDetector, DWposeDetector LOGGER = logging.getLogger(__name__) MODELS = {'scribble_hed': {'class': HEDdetector, 'checkpoint': True}, 'softedge_hed': {'class': HEDdetector, 'checkpoint': True}, 'scribble_hedsafe': {'class': HEDdetector, 'checkpoint': True}, 'softedge_hedsafe': {'class': HEDdetector, 'checkpoint': True}, 'depth_midas': {'class': MidasDetector, 'checkpoint': True}, 'mlsd': {'class': MLSDdetector, 'checkpoint': True}, 'openpose': {'class': OpenposeDetector, 'checkpoint': True}, 'openpose_face': {'class': OpenposeDetector, 'checkpoint': True}, 'openpose_faceonly': {'class': OpenposeDetector, 'checkpoint': True}, 'openpose_full': {'class': OpenposeDetector, 'checkpoint': True}, 'openpose_hand': {'class': OpenposeDetector, 'checkpoint': True}, 'dwpose': {'class': DWposeDetector, 'checkpoint': True}, 'scribble_pidinet': {'class': PidiNetDetector, 'checkpoint': True}, 'softedge_pidinet': {'class': PidiNetDetector, 'checkpoint': True}, 'scribble_pidsafe': {'class': PidiNetDetector, 'checkpoint': True}, 'softedge_pidsafe': {'class': PidiNetDetector, 'checkpoint': True}, 'normal_bae': {'class': NormalBaeDetector, 'checkpoint': True}, 'lineart_coarse': {'class': LineartDetector, 'checkpoint': True}, 'lineart_realistic': {'class': LineartDetector, 'checkpoint': True}, 'lineart_anime': {'class': LineartAnimeDetector, 'checkpoint': True}, 'depth_zoe': {'class': ZoeDetector, 'checkpoint': True}, 'depth_leres': {'class': LeresDetector, 'checkpoint': True}, 'depth_leres++': {'class': LeresDetector, 'checkpoint': True}, 'shuffle': {'class': ContentShuffleDetector, 'checkpoint': False}, 'mediapipe_face': {'class': MediapipeFaceDetector, 'checkpoint': False}, 'canny': {'class': CannyDetector, 'checkpoint': False}} MODEL_PARAMS = {'scribble_hed': {'scribble': True}, 'softedge_hed': {'scribble': False}, 'scribble_hedsafe': {'scribble': True, 'safe': True}, 'softedge_hedsafe': {'scribble': False, 'safe': True}, 'depth_midas': {}, 'mlsd': {}, 'openpose': {'include_body': True, 'include_hand': False, 'include_face': False}, 'openpose_face': {'include_body': True, 'include_hand': False, 'include_face': True}, 'openpose_faceonly': {'include_body': False, 'include_hand': False, 'include_face': True}, 'openpose_full': {'include_body': True, 'include_hand': True, 'include_face': True}, 'openpose_hand': {'include_body': False, 'include_hand': True, 'include_face': False}, 'dwpose': {}, 'scribble_pidinet': {'safe': False, 'scribble': True}, 'softedge_pidinet': {'safe': False, 'scribble': False}, 'scribble_pidsafe': {'safe': True, 'scribble': True}, 'softedge_pidsafe': {'safe': True, 'scribble': False}, 'normal_bae': {}, 'lineart_realistic': {'coarse': False}, 'lineart_coarse': {'coarse': True}, 'lineart_anime': {}, 'canny': {}, 'shuffle': {}, 'depth_zoe': {}, 'depth_leres': {'boost': False}, 'depth_leres++': {'boost': True}, 'mediapipe_face': {}} CHOICES = f'Choices for the processor are {list(MODELS.keys())}' class Processor: def __init__(self, processor_id: str, params: Optional[Dict]=None) -> None: LOGGER.info(f'Loading {processor_id}') if processor_id not in MODELS: raise ValueError(f"{processor_id} is not a valid processor id. Please make sure to choose one of {', '.join(MODELS.keys())}") self.processor_id = processor_id self.processor = self.load_processor(self.processor_id) self.params = MODEL_PARAMS[self.processor_id] if params: self.params.update(params) def load_processor(self, processor_id: str) -> 'Processor': processor = MODELS[processor_id]['class'] if MODELS[processor_id]['checkpoint']: processor = processor.from_pretrained('lllyasviel/Annotators') else: processor = processor() return processor def __call__(self, image: Union[Image.Image, bytes], to_pil: bool=True) -> Union[Image.Image, bytes]: if isinstance(image, bytes): image = Image.open(io.BytesIO(image)).convert('RGB') processed_image = self.processor(image, **self.params) if to_pil: return processed_image else: output_bytes = io.BytesIO() processed_image.save(output_bytes, format='JPEG') return output_bytes.getvalue() # File: controlnet_aux-master/src/controlnet_aux/segment_anything/__init__.py import os import warnings from typing import Union import cv2 import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .automatic_mask_generator import SamAutomaticMaskGenerator from .build_sam import sam_model_registry class SamDetector: def __init__(self, mask_generator: SamAutomaticMaskGenerator): self.mask_generator = mask_generator @classmethod def from_pretrained(cls, pretrained_model_or_path, model_type='vit_h', filename='sam_vit_h_4b8939.pth', subfolder=None, cache_dir=None): if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, subfolder=subfolder, cache_dir=cache_dir) sam = sam_model_registry[model_type](checkpoint=model_path) if torch.cuda.is_available(): sam.to('cuda') mask_generator = SamAutomaticMaskGenerator(sam) return cls(mask_generator) def show_anns(self, anns): if len(anns) == 0: return sorted_anns = sorted(anns, key=lambda x: x['area'], reverse=True) (h, w) = anns[0]['segmentation'].shape final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode='RGB') for ann in sorted_anns: m = ann['segmentation'] img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8) for i in range(3): img[:, :, i] = np.random.randint(255, dtype=np.uint8) final_img.paste(Image.fromarray(img, mode='RGB'), (0, 0), Image.fromarray(np.uint8(m * 255))) return np.array(final_img, dtype=np.uint8) def __call__(self, input_image: Union[np.ndarray, Image.Image]=None, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs) -> Image.Image: if 'image' in kwargs: warnings.warn('image is deprecated, please use `input_image=...` instead.', DeprecationWarning) input_image = kwargs.pop('image') if input_image is None: raise ValueError('input_image must be defined.') if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) masks = self.mask_generator.generate(input_image) map = self.show_anns(masks) detected_map = map detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/segment_anything/automatic_mask_generator.py import numpy as np import torch from torchvision.ops.boxes import batched_nms, box_area from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points class SamAutomaticMaskGenerator: def __init__(self, model: Sam, points_per_side: Optional[int]=32, points_per_batch: int=64, pred_iou_thresh: float=0.88, stability_score_thresh: float=0.95, stability_score_offset: float=1.0, box_nms_thresh: float=0.7, crop_n_layers: int=0, crop_nms_thresh: float=0.7, crop_overlap_ratio: float=512 / 1500, crop_n_points_downscale_factor: int=1, point_grids: Optional[List[np.ndarray]]=None, min_mask_region_area: int=0, output_mode: str='binary_mask') -> None: assert (points_per_side is None) != (point_grids is None), 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids(points_per_side, crop_n_layers, crop_n_points_downscale_factor) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in ['binary_mask', 'uncompressed_rle', 'coco_rle'], f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': from pycocotools import mask as mask_utils if min_mask_region_area > 0: import cv2 self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: mask_data = self._generate_masks(image) if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions(mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh)) if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = {'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist()} curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] (crop_boxes, layer_idxs) = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) data = MaskData() for (crop_box, layer_idx) in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) if len(crop_boxes) > 1: scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms(data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), iou_threshold=self.crop_nms_thresh) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop(self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...]) -> MaskData: (x0, y0, x1, y1) = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() keep_by_nms = batched_nms(data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), iou_threshold=self.box_nms_thresh) data.filter(keep_by_nms) data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch(self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...]) -> MaskData: (orig_h, orig_w) = orig_size transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) (masks, iou_preds, _) = self.predictor.predict_torch(in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True) data = MaskData(masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0))) del masks if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w) data['rles'] = mask_to_rle_pytorch(data['masks']) del data['masks'] return data @staticmethod def postprocess_small_regions(mask_data: MaskData, min_area: int, nms_thresh: float) -> MaskData: if len(mask_data['rles']) == 0: return mask_data new_masks = [] scores = [] for rle in mask_data['rles']: mask = rle_to_mask(rle) (mask, changed) = remove_small_regions(mask, min_area, mode='holes') unchanged = not changed (mask, changed) = remove_small_regions(mask, min_area, mode='islands') unchanged = unchanged and (not changed) new_masks.append(torch.as_tensor(mask).unsqueeze(0)) scores.append(float(unchanged)) masks = torch.cat(new_masks, dim=0) boxes = batched_mask_to_box(masks) keep_by_nms = batched_nms(boxes.float(), torch.as_tensor(scores), torch.zeros_like(boxes[:, 0]), iou_threshold=nms_thresh) for i_mask in keep_by_nms: if scores[i_mask] == 0.0: mask_torch = masks[i_mask].unsqueeze(0) mask_data['rles'][i_mask] = mask_to_rle_pytorch(mask_torch)[0] mask_data['boxes'][i_mask] = boxes[i_mask] mask_data.filter(keep_by_nms) return mask_data # File: controlnet_aux-master/src/controlnet_aux/segment_anything/build_sam.py import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer, TinyViT def build_sam_vit_h(checkpoint=None): return _build_sam(encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None): return _build_sam(encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint) def build_sam_vit_b(checkpoint=None): return _build_sam(encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint) def build_sam_vit_t(checkpoint=None): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam(image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4.0, drop_rate=0.0, drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8), prompt_encoder=PromptEncoder(embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16), mask_decoder=MaskDecoder(num_multimask_outputs=3, transformer=TwoWayTransformer(depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375]) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, 'rb') as f: state_dict = torch.load(f) mobile_sam.load_state_dict(state_dict) return mobile_sam sam_model_registry = {'default': build_sam_vit_h, 'vit_h': build_sam_vit_h, 'vit_l': build_sam_vit_l, 'vit_b': build_sam_vit_b, 'vit_t': build_sam_vit_t} def _build_sam(encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size sam = Sam(image_encoder=ImageEncoderViT(depth=encoder_depth, embed_dim=encoder_embed_dim, img_size=image_size, mlp_ratio=4, norm_layer=partial(torch.nn.LayerNorm, eps=1e-06), num_heads=encoder_num_heads, patch_size=vit_patch_size, qkv_bias=True, use_rel_pos=True, global_attn_indexes=encoder_global_attn_indexes, window_size=14, out_chans=prompt_embed_dim), prompt_encoder=PromptEncoder(embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16), mask_decoder=MaskDecoder(num_multimask_outputs=3, transformer=TwoWayTransformer(depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375]) sam.eval() if checkpoint is not None: with open(checkpoint, 'rb') as f: state_dict = torch.load(f) sam.load_state_dict(state_dict) return sam # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/common.py import torch import torch.nn as nn from typing import Type class MLPBlock(nn.Module): def __init__(self, embedding_dim: int, mlp_dim: int, act: Type[nn.Module]=nn.GELU) -> None: super().__init__() self.lin1 = nn.Linear(embedding_dim, mlp_dim) self.lin2 = nn.Linear(mlp_dim, embedding_dim) self.act = act() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.lin2(self.act(self.lin1(x))) class LayerNorm2d(nn.Module): def __init__(self, num_channels: int, eps: float=1e-06) -> None: super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x: torch.Tensor) -> torch.Tensor: u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/image_encoder.py import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Type from .common import LayerNorm2d, MLPBlock class ImageEncoderViT(nn.Module): def __init__(self, img_size: int=1024, patch_size: int=16, in_chans: int=3, embed_dim: int=768, depth: int=12, num_heads: int=12, mlp_ratio: float=4.0, out_chans: int=256, qkv_bias: bool=True, norm_layer: Type[nn.Module]=nn.LayerNorm, act_layer: Type[nn.Module]=nn.GELU, use_abs_pos: bool=True, use_rel_pos: bool=False, rel_pos_zero_init: bool=True, window_size: int=0, global_attn_indexes: Tuple[int, ...]=()) -> None: super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed(kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: self.pos_embed = nn.Parameter(torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)) self.blocks = nn.ModuleList() for i in range(depth): block = Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size)) self.blocks.append(block) self.neck = nn.Sequential(nn.Conv2d(embed_dim, out_chans, kernel_size=1, bias=False), LayerNorm2d(out_chans), nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False), LayerNorm2d(out_chans)) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) if self.pos_embed is not None: x = x + self.pos_embed for blk in self.blocks: x = blk(x) x = self.neck(x.permute(0, 3, 1, 2)) return x class Block(nn.Module): def __init__(self, dim: int, num_heads: int, mlp_ratio: float=4.0, qkv_bias: bool=True, norm_layer: Type[nn.Module]=nn.LayerNorm, act_layer: Type[nn.Module]=nn.GELU, use_rel_pos: bool=False, rel_pos_zero_init: bool=True, window_size: int=0, input_size: Optional[Tuple[int, int]]=None) -> None: super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, input_size=input_size if window_size == 0 else (window_size, window_size)) self.norm2 = norm_layer(dim) self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) self.window_size = window_size def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.norm1(x) if self.window_size > 0: (H, W) = (x.shape[1], x.shape[2]) (x, pad_hw) = window_partition(x, self.window_size) x = self.attn(x) if self.window_size > 0: x = window_unpartition(x, self.window_size, pad_hw, (H, W)) x = shortcut + x x = x + self.mlp(self.norm2(x)) return x class Attention(nn.Module): def __init__(self, dim: int, num_heads: int=8, qkv_bias: bool=True, use_rel_pos: bool=False, rel_pos_zero_init: bool=True, input_size: Optional[Tuple[int, int]]=None) -> None: super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** (-0.5) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert input_size is not None, 'Input size must be provided if using relative positional encoding.' self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) def forward(self, x: torch.Tensor) -> torch.Tensor: (B, H, W, _) = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) attn = q * self.scale @ k.transpose(-2, -1) if self.use_rel_pos: attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) attn = attn.softmax(dim=-1) x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) x = self.proj(x) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: (B, H, W, C) = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size if pad_h > 0 or pad_w > 0: x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) (Hp, Wp) = (H + pad_h, W + pad_w) x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return (windows, (Hp, Wp)) def window_unpartition(windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]) -> torch.Tensor: (Hp, Wp) = pad_hw (H, W) = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) if Hp > H or Wp > W: x = x[:, :H, :W, :].contiguous() return x def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: max_rel_dist = int(2 * max(q_size, k_size) - 1) if rel_pos.shape[0] != max_rel_dist: rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear') rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos(attn: torch.Tensor, q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int]) -> torch.Tensor: (q_h, q_w) = q_size (k_h, k_w) = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) (B, _, dim) = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh) rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw) attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]).view(B, q_h * q_w, k_h * k_w) return attn class PatchEmbed(nn.Module): def __init__(self, kernel_size: Tuple[int, int]=(16, 16), stride: Tuple[int, int]=(16, 16), padding: Tuple[int, int]=(0, 0), in_chans: int=3, embed_dim: int=768) -> None: super().__init__() self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.proj(x) x = x.permute(0, 2, 3, 1) return x # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/mask_decoder.py import torch from torch import nn from torch.nn import functional as F from typing import List, Tuple, Type from .common import LayerNorm2d class MaskDecoder(nn.Module): def __init__(self, *, transformer_dim: int, transformer: nn.Module, num_multimask_outputs: int=3, activation: Type[nn.Module]=nn.GELU, iou_head_depth: int=3, iou_head_hidden_dim: int=256) -> None: super().__init__() self.transformer_dim = transformer_dim self.transformer = transformer self.num_multimask_outputs = num_multimask_outputs self.iou_token = nn.Embedding(1, transformer_dim) self.num_mask_tokens = num_multimask_outputs + 1 self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) self.output_upscaling = nn.Sequential(nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), LayerNorm2d(transformer_dim // 4), activation(), nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), activation()) self.output_hypernetworks_mlps = nn.ModuleList([MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) for i in range(self.num_mask_tokens)]) self.iou_prediction_head = MLP(transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth) def forward(self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool) -> Tuple[torch.Tensor, torch.Tensor]: (masks, iou_pred) = self.predict_masks(image_embeddings=image_embeddings, image_pe=image_pe, sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings) if multimask_output: mask_slice = slice(1, None) else: mask_slice = slice(0, 1) masks = masks[:, mask_slice, :, :] iou_pred = iou_pred[:, mask_slice] return (masks, iou_pred) def predict_masks(self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) src = src + dense_prompt_embeddings pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) (b, c, h, w) = src.shape (hs, src) = self.transformer(src, pos_src, tokens) iou_token_out = hs[:, 0, :] mask_tokens_out = hs[:, 1:1 + self.num_mask_tokens, :] src = src.transpose(1, 2).view(b, c, h, w) upscaled_embedding = self.output_upscaling(src) hyper_in_list: List[torch.Tensor] = [] for i in range(self.num_mask_tokens): hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) hyper_in = torch.stack(hyper_in_list, dim=1) (b, c, h, w) = upscaled_embedding.shape masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) iou_pred = self.iou_prediction_head(iou_token_out) return (masks, iou_pred) class MLP(nn.Module): def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int, sigmoid_output: bool=False) -> None: super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip([input_dim] + h, h + [output_dim]))) self.sigmoid_output = sigmoid_output def forward(self, x): for (i, layer) in enumerate(self.layers): x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) if self.sigmoid_output: x = F.sigmoid(x) return x # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/prompt_encoder.py import numpy as np import torch from torch import nn from typing import Any, Optional, Tuple, Type from .common import LayerNorm2d class PromptEncoder(nn.Module): def __init__(self, embed_dim: int, image_embedding_size: Tuple[int, int], input_image_size: Tuple[int, int], mask_in_chans: int, activation: Type[nn.Module]=nn.GELU) -> None: super().__init__() self.embed_dim = embed_dim self.input_image_size = input_image_size self.image_embedding_size = image_embedding_size self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) self.num_point_embeddings: int = 4 point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] self.point_embeddings = nn.ModuleList(point_embeddings) self.not_a_point_embed = nn.Embedding(1, embed_dim) self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) self.mask_downscaling = nn.Sequential(nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans // 4), activation(), nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), LayerNorm2d(mask_in_chans), activation(), nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1)) self.no_mask_embed = nn.Embedding(1, embed_dim) def get_dense_pe(self) -> torch.Tensor: return self.pe_layer(self.image_embedding_size).unsqueeze(0) def _embed_points(self, points: torch.Tensor, labels: torch.Tensor, pad: bool) -> torch.Tensor: points = points + 0.5 if pad: padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) points = torch.cat([points, padding_point], dim=1) labels = torch.cat([labels, padding_label], dim=1) point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) point_embedding[labels == -1] = 0.0 point_embedding[labels == -1] += self.not_a_point_embed.weight point_embedding[labels == 0] += self.point_embeddings[0].weight point_embedding[labels == 1] += self.point_embeddings[1].weight return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: boxes = boxes + 0.5 coords = boxes.reshape(-1, 2, 2) corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) corner_embedding[:, 0, :] += self.point_embeddings[2].weight corner_embedding[:, 1, :] += self.point_embeddings[3].weight return corner_embedding def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: mask_embedding = self.mask_downscaling(masks) return mask_embedding def _get_batch_size(self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor]) -> int: if points is not None: return points[0].shape[0] elif boxes is not None: return boxes.shape[0] elif masks is not None: return masks.shape[0] else: return 1 def _get_device(self) -> torch.device: return self.point_embeddings[0].weight.device def forward(self, points: Optional[Tuple[torch.Tensor, torch.Tensor]], boxes: Optional[torch.Tensor], masks: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: bs = self._get_batch_size(points, boxes, masks) sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) if points is not None: (coords, labels) = points point_embeddings = self._embed_points(coords, labels, pad=boxes is None) sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) if boxes is not None: box_embeddings = self._embed_boxes(boxes) sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) if masks is not None: dense_embeddings = self._embed_masks(masks) else: dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]) return (sparse_embeddings, dense_embeddings) class PositionEmbeddingRandom(nn.Module): def __init__(self, num_pos_feats: int=64, scale: Optional[float]=None) -> None: super().__init__() if scale is None or scale <= 0.0: scale = 1.0 self.register_buffer('positional_encoding_gaussian_matrix', scale * torch.randn((2, num_pos_feats))) def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: coords = 2 * coords - 1 coords = coords @ self.positional_encoding_gaussian_matrix coords = 2 * np.pi * coords return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) def forward(self, size: Tuple[int, int]) -> torch.Tensor: (h, w) = size device: Any = self.positional_encoding_gaussian_matrix.device grid = torch.ones((h, w), device=device, dtype=torch.float32) y_embed = grid.cumsum(dim=0) - 0.5 x_embed = grid.cumsum(dim=1) - 0.5 y_embed = y_embed / h x_embed = x_embed / w pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) return pe.permute(2, 0, 1) def forward_with_coords(self, coords_input: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: coords = coords_input.clone() coords[:, :, 0] = coords[:, :, 0] / image_size[1] coords[:, :, 1] = coords[:, :, 1] / image_size[0] return self._pe_encoding(coords.to(torch.float)) # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/sam.py import torch from torch import nn from torch.nn import functional as F from typing import Any, Dict, List, Tuple, Union from .tiny_vit_sam import TinyViT from .image_encoder import ImageEncoderViT from .mask_decoder import MaskDecoder from .prompt_encoder import PromptEncoder class Sam(nn.Module): mask_threshold: float = 0.0 image_format: str = 'RGB' def __init__(self, image_encoder: Union[ImageEncoderViT, TinyViT], prompt_encoder: PromptEncoder, mask_decoder: MaskDecoder, pixel_mean: List[float]=[123.675, 116.28, 103.53], pixel_std: List[float]=[58.395, 57.12, 57.375]) -> None: super().__init__() self.image_encoder = image_encoder self.prompt_encoder = prompt_encoder self.mask_decoder = mask_decoder self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False) @property def device(self) -> Any: return self.pixel_mean.device @torch.no_grad() def forward(self, batched_input: List[Dict[str, Any]], multimask_output: bool) -> List[Dict[str, torch.Tensor]]: input_images = torch.stack([self.preprocess(x['image']) for x in batched_input], dim=0) image_embeddings = self.image_encoder(input_images) outputs = [] for (image_record, curr_embedding) in zip(batched_input, image_embeddings): if 'point_coords' in image_record: points = (image_record['point_coords'], image_record['point_labels']) else: points = None (sparse_embeddings, dense_embeddings) = self.prompt_encoder(points=points, boxes=image_record.get('boxes', None), masks=image_record.get('mask_inputs', None)) (low_res_masks, iou_predictions) = self.mask_decoder(image_embeddings=curr_embedding.unsqueeze(0), image_pe=self.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output) masks = self.postprocess_masks(low_res_masks, input_size=image_record['image'].shape[-2:], original_size=image_record['original_size']) masks = masks > self.mask_threshold outputs.append({'masks': masks, 'iou_predictions': iou_predictions, 'low_res_logits': low_res_masks}) return outputs def postprocess_masks(self, masks: torch.Tensor, input_size: Tuple[int, ...], original_size: Tuple[int, ...]) -> torch.Tensor: masks = F.interpolate(masks, (self.image_encoder.img_size, self.image_encoder.img_size), mode='bilinear', align_corners=False) masks = masks[..., :input_size[0], :input_size[1]] masks = F.interpolate(masks, original_size, mode='bilinear', align_corners=False) return masks def preprocess(self, x: torch.Tensor) -> torch.Tensor: x = (x - self.pixel_mean) / self.pixel_std (h, w) = x.shape[-2:] padh = self.image_encoder.img_size - h padw = self.image_encoder.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/tiny_vit_sam.py import itertools import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import DropPath as TimmDropPath, to_2tuple, trunc_normal_ from timm.models.registry import register_model from typing import Tuple class Conv2d_BN(torch.nn.Sequential): def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', torch.nn.Conv2d(a, b, ks, stride, pad, dilation, groups, bias=False)) bn = torch.nn.BatchNorm2d(b) torch.nn.init.constant_(bn.weight, bn_weight_init) torch.nn.init.constant_(bn.bias, 0) self.add_module('bn', bn) @torch.no_grad() def fuse(self): (c, bn) = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class DropPath(TimmDropPath): def __init__(self, drop_prob=None): super().__init__(drop_prob=drop_prob) self.drop_prob = drop_prob def __repr__(self): msg = super().__repr__() msg += f'(drop_prob={self.drop_prob})' return msg class PatchEmbed(nn.Module): def __init__(self, in_chans, embed_dim, resolution, activation): super().__init__() img_size: Tuple[int, int] = to_2tuple(resolution) self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) self.num_patches = self.patches_resolution[0] * self.patches_resolution[1] self.in_chans = in_chans self.embed_dim = embed_dim n = embed_dim self.seq = nn.Sequential(Conv2d_BN(in_chans, n // 2, 3, 2, 1), activation(), Conv2d_BN(n // 2, n, 3, 2, 1)) def forward(self, x): return self.seq(x) class MBConv(nn.Module): def __init__(self, in_chans, out_chans, expand_ratio, activation, drop_path): super().__init__() self.in_chans = in_chans self.hidden_chans = int(in_chans * expand_ratio) self.out_chans = out_chans self.conv1 = Conv2d_BN(in_chans, self.hidden_chans, ks=1) self.act1 = activation() self.conv2 = Conv2d_BN(self.hidden_chans, self.hidden_chans, ks=3, stride=1, pad=1, groups=self.hidden_chans) self.act2 = activation() self.conv3 = Conv2d_BN(self.hidden_chans, out_chans, ks=1, bn_weight_init=0.0) self.act3 = activation() self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def forward(self, x): shortcut = x x = self.conv1(x) x = self.act1(x) x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop_path(x) x += shortcut x = self.act3(x) return x class PatchMerging(nn.Module): def __init__(self, input_resolution, dim, out_dim, activation): super().__init__() self.input_resolution = input_resolution self.dim = dim self.out_dim = out_dim self.act = activation() self.conv1 = Conv2d_BN(dim, out_dim, 1, 1, 0) stride_c = 2 if out_dim == 320 or out_dim == 448 or out_dim == 576: stride_c = 1 self.conv2 = Conv2d_BN(out_dim, out_dim, 3, stride_c, 1, groups=out_dim) self.conv3 = Conv2d_BN(out_dim, out_dim, 1, 1, 0) def forward(self, x): if x.ndim == 3: (H, W) = self.input_resolution B = len(x) x = x.view(B, H, W, -1).permute(0, 3, 1, 2) x = self.conv1(x) x = self.act(x) x = self.conv2(x) x = self.act(x) x = self.conv3(x) x = x.flatten(2).transpose(1, 2) return x class ConvLayer(nn.Module): def __init__(self, dim, input_resolution, depth, activation, drop_path=0.0, downsample=None, use_checkpoint=False, out_dim=None, conv_expand_ratio=4.0): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint self.blocks = nn.ModuleList([MBConv(dim, dim, conv_expand_ratio, activation, drop_path[i] if isinstance(drop_path, list) else drop_path) for i in range(depth)]) if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) else: self.downsample = None def forward(self, x): for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if self.downsample is not None: x = self.downsample(x) return x class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.norm = nn.LayerNorm(in_features) self.fc1 = nn.Linear(in_features, hidden_features) self.fc2 = nn.Linear(hidden_features, out_features) self.act = act_layer() self.drop = nn.Dropout(drop) def forward(self, x): x = self.norm(x) x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(torch.nn.Module): def __init__(self, dim, key_dim, num_heads=8, attn_ratio=4, resolution=(14, 14)): super().__init__() assert isinstance(resolution, tuple) and len(resolution) == 2 self.num_heads = num_heads self.scale = key_dim ** (-0.5) self.key_dim = key_dim self.nh_kd = nh_kd = key_dim * num_heads self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio h = self.dh + nh_kd * 2 self.norm = nn.LayerNorm(dim) self.qkv = nn.Linear(dim, h) self.proj = nn.Linear(self.dh, dim) points = list(itertools.product(range(resolution[0]), range(resolution[1]))) N = len(points) attention_offsets = {} idxs = [] for p1 in points: for p2 in points: offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) if offset not in attention_offsets: attention_offsets[offset] = len(attention_offsets) idxs.append(attention_offsets[offset]) self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets))) self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and hasattr(self, 'ab'): del self.ab else: self.ab = self.attention_biases[:, self.attention_bias_idxs] def forward(self, x): (B, N, _) = x.shape x = self.norm(x) qkv = self.qkv(x) (q, k, v) = qkv.view(B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.d], dim=3) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 1, 3) v = v.permute(0, 2, 1, 3) attn = q @ k.transpose(-2, -1) * self.scale + (self.attention_biases[:, self.attention_bias_idxs] if self.training else self.ab) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) x = self.proj(x) return x class TinyViTBlock(nn.Module): def __init__(self, dim, input_resolution, num_heads, window_size=7, mlp_ratio=4.0, drop=0.0, drop_path=0.0, local_conv_size=3, activation=nn.GELU): super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads assert window_size > 0, 'window_size must be greater than 0' self.window_size = window_size self.mlp_ratio = mlp_ratio self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() assert dim % num_heads == 0, 'dim must be divisible by num_heads' head_dim = dim // num_heads window_resolution = (window_size, window_size) self.attn = Attention(dim, head_dim, num_heads, attn_ratio=1, resolution=window_resolution) mlp_hidden_dim = int(dim * mlp_ratio) mlp_activation = activation self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=mlp_activation, drop=drop) pad = local_conv_size // 2 self.local_conv = Conv2d_BN(dim, dim, ks=local_conv_size, stride=1, pad=pad, groups=dim) def forward(self, x): (H, W) = self.input_resolution (B, L, C) = x.shape assert L == H * W, 'input feature has wrong size' res_x = x if H == self.window_size and W == self.window_size: x = self.attn(x) else: x = x.view(B, H, W, C) pad_b = (self.window_size - H % self.window_size) % self.window_size pad_r = (self.window_size - W % self.window_size) % self.window_size padding = pad_b > 0 or pad_r > 0 if padding: x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) (pH, pW) = (H + pad_b, W + pad_r) nH = pH // self.window_size nW = pW // self.window_size x = x.view(B, nH, self.window_size, nW, self.window_size, C).transpose(2, 3).reshape(B * nH * nW, self.window_size * self.window_size, C) x = self.attn(x) x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C) if padding: x = x[:, :H, :W].contiguous() x = x.view(B, L, C) x = res_x + self.drop_path(x) x = x.transpose(1, 2).reshape(B, C, H, W) x = self.local_conv(x) x = x.view(B, C, L).transpose(1, 2) x = x + self.drop_path(self.mlp(x)) return x def extra_repr(self) -> str: return f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, window_size={self.window_size}, mlp_ratio={self.mlp_ratio}' class BasicLayer(nn.Module): def __init__(self, dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4.0, drop=0.0, drop_path=0.0, downsample=None, use_checkpoint=False, local_conv_size=3, activation=nn.GELU, out_dim=None): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint self.blocks = nn.ModuleList([TinyViTBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, drop=drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, local_conv_size=local_conv_size, activation=activation) for i in range(depth)]) if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, out_dim=out_dim, activation=activation) else: self.downsample = None def forward(self, x): for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if self.downsample is not None: x = self.downsample(x) return x def extra_repr(self) -> str: return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}' class LayerNorm2d(nn.Module): def __init__(self, num_channels: int, eps: float=1e-06) -> None: super().__init__() self.weight = nn.Parameter(torch.ones(num_channels)) self.bias = nn.Parameter(torch.zeros(num_channels)) self.eps = eps def forward(self, x: torch.Tensor) -> torch.Tensor: u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x class TinyViT(nn.Module): def __init__(self, img_size=224, in_chans=3, num_classes=1000, embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_sizes=[7, 7, 14, 7], mlp_ratio=4.0, drop_rate=0.0, drop_path_rate=0.1, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=1.0): super().__init__() self.img_size = img_size self.num_classes = num_classes self.depths = depths self.num_layers = len(depths) self.mlp_ratio = mlp_ratio activation = nn.GELU self.patch_embed = PatchEmbed(in_chans=in_chans, embed_dim=embed_dims[0], resolution=img_size, activation=activation) patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] self.layers = nn.ModuleList() for i_layer in range(self.num_layers): kwargs = dict(dim=embed_dims[i_layer], input_resolution=(patches_resolution[0] // 2 ** (i_layer - 1 if i_layer == 3 else i_layer), patches_resolution[1] // 2 ** (i_layer - 1 if i_layer == 3 else i_layer)), depth=depths[i_layer], drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], downsample=PatchMerging if i_layer < self.num_layers - 1 else None, use_checkpoint=use_checkpoint, out_dim=embed_dims[min(i_layer + 1, len(embed_dims) - 1)], activation=activation) if i_layer == 0: layer = ConvLayer(conv_expand_ratio=mbconv_expand_ratio, **kwargs) else: layer = BasicLayer(num_heads=num_heads[i_layer], window_size=window_sizes[i_layer], mlp_ratio=self.mlp_ratio, drop=drop_rate, local_conv_size=local_conv_size, **kwargs) self.layers.append(layer) self.norm_head = nn.LayerNorm(embed_dims[-1]) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity() self.apply(self._init_weights) self.set_layer_lr_decay(layer_lr_decay) self.neck = nn.Sequential(nn.Conv2d(embed_dims[-1], 256, kernel_size=1, bias=False), LayerNorm2d(256), nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), LayerNorm2d(256)) def set_layer_lr_decay(self, layer_lr_decay): decay_rate = layer_lr_decay depth = sum(self.depths) lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)] def _set_lr_scale(m, scale): for p in m.parameters(): p.lr_scale = scale self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0])) i = 0 for layer in self.layers: for block in layer.blocks: block.apply(lambda x: _set_lr_scale(x, lr_scales[i])) i += 1 if layer.downsample is not None: layer.downsample.apply(lambda x: _set_lr_scale(x, lr_scales[i - 1])) assert i == depth for m in [self.norm_head, self.head]: m.apply(lambda x: _set_lr_scale(x, lr_scales[-1])) for (k, p) in self.named_parameters(): p.param_name = k def _check_lr_scale(m): for p in m.parameters(): assert hasattr(p, 'lr_scale'), p.param_name self.apply(_check_lr_scale) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay_keywords(self): return {'attention_biases'} def forward_features(self, x): x = self.patch_embed(x) x = self.layers[0](x) start_i = 1 for i in range(start_i, len(self.layers)): layer = self.layers[i] x = layer(x) (B, _, C) = x.size() x = x.view(B, 64, 64, C) x = x.permute(0, 3, 1, 2) x = self.neck(x) return x def forward(self, x): x = self.forward_features(x) return x _checkpoint_url_format = 'https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/{}.pth' _provided_checkpoints = {'tiny_vit_5m_224': 'tiny_vit_5m_22kto1k_distill', 'tiny_vit_11m_224': 'tiny_vit_11m_22kto1k_distill', 'tiny_vit_21m_224': 'tiny_vit_21m_22kto1k_distill', 'tiny_vit_21m_384': 'tiny_vit_21m_22kto1k_384_distill', 'tiny_vit_21m_512': 'tiny_vit_21m_22kto1k_512_distill'} def register_tiny_vit_model(fn): def fn_wrapper(pretrained=False, **kwargs): model = fn() if pretrained: model_name = fn.__name__ assert model_name in _provided_checkpoints, f'Sorry that the checkpoint `{model_name}` is not provided yet.' url = _checkpoint_url_format.format(_provided_checkpoints[model_name]) checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location='cpu', check_hash=False) model.load_state_dict(checkpoint['model']) return model fn_wrapper.__name__ = fn.__name__ return register_model(fn_wrapper) @register_tiny_vit_model def tiny_vit_5m_224(pretrained=False, num_classes=1000, drop_path_rate=0.0): return TinyViT(num_classes=num_classes, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], drop_path_rate=drop_path_rate) @register_tiny_vit_model def tiny_vit_11m_224(pretrained=False, num_classes=1000, drop_path_rate=0.1): return TinyViT(num_classes=num_classes, embed_dims=[64, 128, 256, 448], depths=[2, 2, 6, 2], num_heads=[2, 4, 8, 14], window_sizes=[7, 7, 14, 7], drop_path_rate=drop_path_rate) @register_tiny_vit_model def tiny_vit_21m_224(pretrained=False, num_classes=1000, drop_path_rate=0.2): return TinyViT(num_classes=num_classes, embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[7, 7, 14, 7], drop_path_rate=drop_path_rate) @register_tiny_vit_model def tiny_vit_21m_384(pretrained=False, num_classes=1000, drop_path_rate=0.1): return TinyViT(img_size=384, num_classes=num_classes, embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[12, 12, 24, 12], drop_path_rate=drop_path_rate) @register_tiny_vit_model def tiny_vit_21m_512(pretrained=False, num_classes=1000, drop_path_rate=0.1): return TinyViT(img_size=512, num_classes=num_classes, embed_dims=[96, 192, 384, 576], depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 18], window_sizes=[16, 16, 32, 16], drop_path_rate=drop_path_rate) # File: controlnet_aux-master/src/controlnet_aux/segment_anything/modeling/transformer.py import torch from torch import Tensor, nn import math from typing import Tuple, Type from .common import MLPBlock class TwoWayTransformer(nn.Module): def __init__(self, depth: int, embedding_dim: int, num_heads: int, mlp_dim: int, activation: Type[nn.Module]=nn.ReLU, attention_downsample_rate: int=2) -> None: super().__init__() self.depth = depth self.embedding_dim = embedding_dim self.num_heads = num_heads self.mlp_dim = mlp_dim self.layers = nn.ModuleList() for i in range(depth): self.layers.append(TwoWayAttentionBlock(embedding_dim=embedding_dim, num_heads=num_heads, mlp_dim=mlp_dim, activation=activation, attention_downsample_rate=attention_downsample_rate, skip_first_layer_pe=i == 0)) self.final_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.norm_final_attn = nn.LayerNorm(embedding_dim) def forward(self, image_embedding: Tensor, image_pe: Tensor, point_embedding: Tensor) -> Tuple[Tensor, Tensor]: (bs, c, h, w) = image_embedding.shape image_embedding = image_embedding.flatten(2).permute(0, 2, 1) image_pe = image_pe.flatten(2).permute(0, 2, 1) queries = point_embedding keys = image_embedding for layer in self.layers: (queries, keys) = layer(queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe) q = queries + point_embedding k = keys + image_pe attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm_final_attn(queries) return (queries, keys) class TwoWayAttentionBlock(nn.Module): def __init__(self, embedding_dim: int, num_heads: int, mlp_dim: int=2048, activation: Type[nn.Module]=nn.ReLU, attention_downsample_rate: int=2, skip_first_layer_pe: bool=False) -> None: super().__init__() self.self_attn = Attention(embedding_dim, num_heads) self.norm1 = nn.LayerNorm(embedding_dim) self.cross_attn_token_to_image = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.norm2 = nn.LayerNorm(embedding_dim) self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) self.norm3 = nn.LayerNorm(embedding_dim) self.norm4 = nn.LayerNorm(embedding_dim) self.cross_attn_image_to_token = Attention(embedding_dim, num_heads, downsample_rate=attention_downsample_rate) self.skip_first_layer_pe = skip_first_layer_pe def forward(self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor) -> Tuple[Tensor, Tensor]: if self.skip_first_layer_pe: queries = self.self_attn(q=queries, k=queries, v=queries) else: q = queries + query_pe attn_out = self.self_attn(q=q, k=q, v=queries) queries = queries + attn_out queries = self.norm1(queries) q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) queries = queries + attn_out queries = self.norm2(queries) mlp_out = self.mlp(queries) queries = queries + mlp_out queries = self.norm3(queries) q = queries + query_pe k = keys + key_pe attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) keys = keys + attn_out keys = self.norm4(keys) return (queries, keys) class Attention(nn.Module): def __init__(self, embedding_dim: int, num_heads: int, downsample_rate: int=1) -> None: super().__init__() self.embedding_dim = embedding_dim self.internal_dim = embedding_dim // downsample_rate self.num_heads = num_heads assert self.internal_dim % num_heads == 0, 'num_heads must divide embedding_dim.' self.q_proj = nn.Linear(embedding_dim, self.internal_dim) self.k_proj = nn.Linear(embedding_dim, self.internal_dim) self.v_proj = nn.Linear(embedding_dim, self.internal_dim) self.out_proj = nn.Linear(self.internal_dim, embedding_dim) def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: (b, n, c) = x.shape x = x.reshape(b, n, num_heads, c // num_heads) return x.transpose(1, 2) def _recombine_heads(self, x: Tensor) -> Tensor: (b, n_heads, n_tokens, c_per_head) = x.shape x = x.transpose(1, 2) return x.reshape(b, n_tokens, n_heads * c_per_head) def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: q = self.q_proj(q) k = self.k_proj(k) v = self.v_proj(v) q = self._separate_heads(q, self.num_heads) k = self._separate_heads(k, self.num_heads) v = self._separate_heads(v, self.num_heads) (_, _, _, c_per_head) = q.shape attn = q @ k.permute(0, 1, 3, 2) attn = attn / math.sqrt(c_per_head) attn = torch.softmax(attn, dim=-1) out = attn @ v out = self._recombine_heads(out) out = self.out_proj(out) return out # File: controlnet_aux-master/src/controlnet_aux/segment_anything/predictor.py import numpy as np import torch from .modeling import Sam from typing import Optional, Tuple from .utils.transforms import ResizeLongestSide class SamPredictor: def __init__(self, sam_model: Sam) -> None: super().__init__() self.model = sam_model self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) self.reset_image() def set_image(self, image: np.ndarray, image_format: str='RGB') -> None: assert image_format in ['RGB', 'BGR'], f"image_format must be in ['RGB', 'BGR'], is {image_format}." if image_format != self.model.image_format: image = image[..., ::-1] input_image = self.transform.apply_image(image) input_image_torch = torch.as_tensor(input_image, device=self.device) input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] self.set_torch_image(input_image_torch, image.shape[:2]) @torch.no_grad() def set_torch_image(self, transformed_image: torch.Tensor, original_image_size: Tuple[int, ...]) -> None: assert len(transformed_image.shape) == 4 and transformed_image.shape[1] == 3 and (max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size), f'set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.' self.reset_image() self.original_size = original_image_size self.input_size = tuple(transformed_image.shape[-2:]) input_image = self.model.preprocess(transformed_image) self.features = self.model.image_encoder(input_image) self.is_image_set = True def predict(self, point_coords: Optional[np.ndarray]=None, point_labels: Optional[np.ndarray]=None, box: Optional[np.ndarray]=None, mask_input: Optional[np.ndarray]=None, multimask_output: bool=True, return_logits: bool=False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: if not self.is_image_set: raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') (coords_torch, labels_torch, box_torch, mask_input_torch) = (None, None, None, None) if point_coords is not None: assert point_labels is not None, 'point_labels must be supplied if point_coords is supplied.' point_coords = self.transform.apply_coords(point_coords, self.original_size) coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) (coords_torch, labels_torch) = (coords_torch[None, :, :], labels_torch[None, :]) if box is not None: box = self.transform.apply_boxes(box, self.original_size) box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) box_torch = box_torch[None, :] if mask_input is not None: mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) mask_input_torch = mask_input_torch[None, :, :, :] (masks, iou_predictions, low_res_masks) = self.predict_torch(coords_torch, labels_torch, box_torch, mask_input_torch, multimask_output, return_logits=return_logits) masks_np = masks[0].detach().cpu().numpy() iou_predictions_np = iou_predictions[0].detach().cpu().numpy() low_res_masks_np = low_res_masks[0].detach().cpu().numpy() return (masks_np, iou_predictions_np, low_res_masks_np) @torch.no_grad() def predict_torch(self, point_coords: Optional[torch.Tensor], point_labels: Optional[torch.Tensor], boxes: Optional[torch.Tensor]=None, mask_input: Optional[torch.Tensor]=None, multimask_output: bool=True, return_logits: bool=False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: if not self.is_image_set: raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') if point_coords is not None: points = (point_coords, point_labels) else: points = None (sparse_embeddings, dense_embeddings) = self.model.prompt_encoder(points=points, boxes=boxes, masks=mask_input) (low_res_masks, iou_predictions) = self.model.mask_decoder(image_embeddings=self.features, image_pe=self.model.prompt_encoder.get_dense_pe(), sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output) masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) if not return_logits: masks = masks > self.model.mask_threshold return (masks, iou_predictions, low_res_masks) def get_image_embedding(self) -> torch.Tensor: if not self.is_image_set: raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.') assert self.features is not None, 'Features must exist if an image has been set.' return self.features @property def device(self) -> torch.device: return self.model.device def reset_image(self) -> None: self.is_image_set = False self.features = None self.orig_h = None self.orig_w = None self.input_h = None self.input_w = None # File: controlnet_aux-master/src/controlnet_aux/shuffle/__init__.py import warnings import cv2 import numpy as np from PIL import Image from ..util import HWC3, img2mask, make_noise_disk, resize_image class ContentShuffleDetector: def __call__(self, input_image, h=None, w=None, f=None, detect_resolution=512, image_resolution=512, output_type='pil', **kwargs): if 'return_pil' in kwargs: warnings.warn('return_pil is deprecated. Use output_type instead.', DeprecationWarning) output_type = 'pil' if kwargs['return_pil'] else 'np' if type(output_type) is bool: warnings.warn('Passing `True` or `False` to `output_type` is deprecated and will raise an error in future versions') if output_type: output_type = 'pil' if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) (H, W, C) = input_image.shape if h is None: h = H if w is None: w = W if f is None: f = 256 x = make_noise_disk(h, w, 1, f) * float(W - 1) y = make_noise_disk(h, w, 1, f) * float(H - 1) flow = np.concatenate([x, y], axis=2).astype(np.float32) detected_map = cv2.remap(input_image, flow, None, cv2.INTER_LINEAR) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map class ColorShuffleDetector: def __call__(self, img): (H, W, C) = img.shape F = np.random.randint(64, 384) A = make_noise_disk(H, W, 3, F) B = make_noise_disk(H, W, 3, F) C = (A + B) / 2.0 A = (C + (A - C) * 3.0).clip(0, 1) B = (C + (B - C) * 3.0).clip(0, 1) L = img.astype(np.float32) / 255.0 Y = A * L + B * (1 - L) Y -= np.min(Y, axis=(0, 1), keepdims=True) Y /= np.maximum(np.max(Y, axis=(0, 1), keepdims=True), 1e-05) Y *= 255.0 return Y.clip(0, 255).astype(np.uint8) class GrayDetector: def __call__(self, img): eps = 1e-05 X = img.astype(np.float32) (r, g, b) = (X[:, :, 0], X[:, :, 1], X[:, :, 2]) (kr, kg, kb) = [random.random() + eps for _ in range(3)] ks = kr + kg + kb kr /= ks kg /= ks kb /= ks Y = r * kr + g * kg + b * kb Y = np.stack([Y] * 3, axis=2) return Y.clip(0, 255).astype(np.uint8) class DownSampleDetector: def __call__(self, img, level=3, k=16.0): h = img.astype(np.float32) for _ in range(level): h += np.random.normal(loc=0.0, scale=k, size=h.shape) h = cv2.pyrDown(h) for _ in range(level): h = cv2.pyrUp(h) h += np.random.normal(loc=0.0, scale=k, size=h.shape) return h.clip(0, 255).astype(np.uint8) class Image2MaskShuffleDetector: def __init__(self, resolution=(640, 512)): (self.H, self.W) = resolution def __call__(self, img): m = img2mask(img, self.H, self.W) m *= 255.0 return m.clip(0, 255).astype(np.uint8) # File: controlnet_aux-master/src/controlnet_aux/teed/Fsmish.py """""" import torch @torch.jit.script def smish(input): return input * torch.tanh(torch.log(1 + torch.sigmoid(input))) # File: controlnet_aux-master/src/controlnet_aux/teed/Xsmish.py """""" from torch import nn from .Fsmish import smish class Smish(nn.Module): def __init__(self): super().__init__() def forward(self, input): return smish(input) # File: controlnet_aux-master/src/controlnet_aux/teed/__init__.py import os import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image, safe_step from .ted import TED class TEEDdetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, filename=None, subfolder=None): if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, subfolder=subfolder) model = TED() model.load_state_dict(torch.load(model_path, map_location='cpu')) return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, detect_resolution=512, safe_steps=2, output_type='pil'): device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) output_type = output_type or 'pil' else: output_type = output_type or 'np' (original_height, original_width, _) = input_image.shape input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 (height, width, _) = input_image.shape with torch.no_grad(): image_teed = torch.from_numpy(input_image.copy()).float().to(device) image_teed = rearrange(image_teed, 'h w c -> 1 c h w') edges = self.model(image_teed) edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges] edges = [cv2.resize(e, (width, height), interpolation=cv2.INTER_LINEAR) for e in edges] edges = np.stack(edges, axis=2) edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64))) if safe_steps != 0: edge = safe_step(edge, safe_steps) edge = (edge * 255.0).clip(0, 255).astype(np.uint8) detected_map = edge detected_map = HWC3(detected_map) detected_map = cv2.resize(detected_map, (original_width, original_height), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/teed/ted.py import torch import torch.nn as nn import torch.nn.functional as F from .Fsmish import smish as Fsmish from .Xsmish import Smish def weight_init(m): if isinstance(m, (nn.Conv2d,)): torch.nn.init.xavier_normal_(m.weight, gain=1.0) if m.bias is not None: torch.nn.init.zeros_(m.bias) if isinstance(m, (nn.ConvTranspose2d,)): torch.nn.init.xavier_normal_(m.weight, gain=1.0) if m.bias is not None: torch.nn.init.zeros_(m.bias) class CoFusion(nn.Module): def __init__(self, in_ch, out_ch): super(CoFusion, self).__init__() self.conv1 = nn.Conv2d(in_ch, 32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(32, out_ch, kernel_size=3, stride=1, padding=1) self.relu = nn.ReLU() self.norm_layer1 = nn.GroupNorm(4, 32) def forward(self, x): attn = self.relu(self.norm_layer1(self.conv1(x))) attn = F.softmax(self.conv3(attn), dim=1) return (x * attn).sum(1).unsqueeze(1) class CoFusion2(nn.Module): def __init__(self, in_ch, out_ch): super(CoFusion2, self).__init__() self.conv1 = nn.Conv2d(in_ch, 32, kernel_size=3, stride=1, padding=1) self.conv3 = nn.Conv2d(32, out_ch, kernel_size=3, stride=1, padding=1) self.smish = Smish() def forward(self, x): attn = self.conv1(self.smish(x)) attn = self.conv3(self.smish(attn)) return (x * attn).sum(1).unsqueeze(1) class DoubleFusion(nn.Module): def __init__(self, in_ch, out_ch): super(DoubleFusion, self).__init__() self.DWconv1 = nn.Conv2d(in_ch, in_ch * 8, kernel_size=3, stride=1, padding=1, groups=in_ch) self.PSconv1 = nn.PixelShuffle(1) self.DWconv2 = nn.Conv2d(24, 24 * 1, kernel_size=3, stride=1, padding=1, groups=24) self.AF = Smish() def forward(self, x): attn = self.PSconv1(self.DWconv1(self.AF(x))) attn2 = self.PSconv1(self.DWconv2(self.AF(attn))) return Fsmish((attn2 + attn).sum(1).unsqueeze(1)) class _DenseLayer(nn.Sequential): def __init__(self, input_features, out_features): super(_DenseLayer, self).__init__() (self.add_module('conv1', nn.Conv2d(input_features, out_features, kernel_size=3, stride=1, padding=2, bias=True)),) (self.add_module('smish1', Smish()),) self.add_module('conv2', nn.Conv2d(out_features, out_features, kernel_size=3, stride=1, bias=True)) def forward(self, x): (x1, x2) = x new_features = super(_DenseLayer, self).forward(Fsmish(x1)) return (0.5 * (new_features + x2), x2) class _DenseBlock(nn.Sequential): def __init__(self, num_layers, input_features, out_features): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer(input_features, out_features) self.add_module('denselayer%d' % (i + 1), layer) input_features = out_features class UpConvBlock(nn.Module): def __init__(self, in_features, up_scale): super(UpConvBlock, self).__init__() self.up_factor = 2 self.constant_features = 16 layers = self.make_deconv_layers(in_features, up_scale) assert layers is not None, layers self.features = nn.Sequential(*layers) def make_deconv_layers(self, in_features, up_scale): layers = [] all_pads = [0, 0, 1, 3, 7] for i in range(up_scale): kernel_size = 2 ** up_scale pad = all_pads[up_scale] out_features = self.compute_out_features(i, up_scale) layers.append(nn.Conv2d(in_features, out_features, 1)) layers.append(Smish()) layers.append(nn.ConvTranspose2d(out_features, out_features, kernel_size, stride=2, padding=pad)) in_features = out_features return layers def compute_out_features(self, idx, up_scale): return 1 if idx == up_scale - 1 else self.constant_features def forward(self, x): return self.features(x) class SingleConvBlock(nn.Module): def __init__(self, in_features, out_features, stride, use_ac=False): super(SingleConvBlock, self).__init__() self.use_ac = use_ac self.conv = nn.Conv2d(in_features, out_features, 1, stride=stride, bias=True) if self.use_ac: self.smish = Smish() def forward(self, x): x = self.conv(x) if self.use_ac: return self.smish(x) else: return x class DoubleConvBlock(nn.Module): def __init__(self, in_features, mid_features, out_features=None, stride=1, use_act=True): super(DoubleConvBlock, self).__init__() self.use_act = use_act if out_features is None: out_features = mid_features self.conv1 = nn.Conv2d(in_features, mid_features, 3, padding=1, stride=stride) self.conv2 = nn.Conv2d(mid_features, out_features, 3, padding=1) self.smish = Smish() def forward(self, x): x = self.conv1(x) x = self.smish(x) x = self.conv2(x) if self.use_act: x = self.smish(x) return x class TED(nn.Module): def __init__(self): super(TED, self).__init__() self.block_1 = DoubleConvBlock(3, 16, 16, stride=2) self.block_2 = DoubleConvBlock(16, 32, use_act=False) self.dblock_3 = _DenseBlock(1, 32, 48) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.side_1 = SingleConvBlock(16, 32, 2) self.pre_dense_3 = SingleConvBlock(32, 48, 1) self.up_block_1 = UpConvBlock(16, 1) self.up_block_2 = UpConvBlock(32, 1) self.up_block_3 = UpConvBlock(48, 2) self.block_cat = DoubleFusion(3, 3) self.apply(weight_init) def slice(self, tensor, slice_shape): t_shape = tensor.shape (img_h, img_w) = slice_shape if img_w != t_shape[-1] or img_h != t_shape[2]: new_tensor = F.interpolate(tensor, size=(img_h, img_w), mode='bicubic', align_corners=False) else: new_tensor = tensor return new_tensor def resize_input(self, tensor): t_shape = tensor.shape if t_shape[2] % 8 != 0 or t_shape[3] % 8 != 0: img_w = (t_shape[3] // 8 + 1) * 8 img_h = (t_shape[2] // 8 + 1) * 8 new_tensor = F.interpolate(tensor, size=(img_h, img_w), mode='bicubic', align_corners=False) else: new_tensor = tensor return new_tensor def crop_bdcn(data1, h, w, crop_h, crop_w): (_, _, h1, w1) = data1.size() assert h <= h1 and w <= w1 data = data1[:, :, crop_h:crop_h + h, crop_w:crop_w + w] return data def forward(self, x, single_test=False): assert x.ndim == 4, x.shape block_1 = self.block_1(x) block_1_side = self.side_1(block_1) block_2 = self.block_2(block_1) block_2_down = self.maxpool(block_2) block_2_add = block_2_down + block_1_side block_3_pre_dense = self.pre_dense_3(block_2_down) (block_3, _) = self.dblock_3([block_2_add, block_3_pre_dense]) out_1 = self.up_block_1(block_1) out_2 = self.up_block_2(block_2) out_3 = self.up_block_3(block_3) results = [out_1, out_2, out_3] block_cat = torch.cat(results, dim=1) block_cat = self.block_cat(block_cat) results.append(block_cat) return results if __name__ == '__main__': batch_size = 8 img_height = 352 img_width = 352 device = 'cpu' input = torch.rand(batch_size, 3, img_height, img_width).to(device) print(f'input shape: {input.shape}') model = TED().to(device) output = model(input) print(f'output shapes: {[t.shape for t in output]}') # File: controlnet_aux-master/src/controlnet_aux/util.py import os import random import cv2 import numpy as np import torch annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') def HWC3(x): assert x.dtype == np.uint8 if x.ndim == 2: x = x[:, :, None] assert x.ndim == 3 (H, W, C) = x.shape assert C == 1 or C == 3 or C == 4 if C == 3: return x if C == 1: return np.concatenate([x, x, x], axis=2) if C == 4: color = x[:, :, 0:3].astype(np.float32) alpha = x[:, :, 3:4].astype(np.float32) / 255.0 y = color * alpha + 255.0 * (1.0 - alpha) y = y.clip(0, 255).astype(np.uint8) return y def make_noise_disk(H, W, C, F): noise = np.random.uniform(low=0, high=1, size=(H // F + 2, W // F + 2, C)) noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC) noise = noise[F:F + H, F:F + W] noise -= np.min(noise) noise /= np.max(noise) if C == 1: noise = noise[:, :, None] return noise def nms(x, t, s): x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s) f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8) f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8) f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8) f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8) y = np.zeros_like(x) for f in [f1, f2, f3, f4]: np.putmask(y, cv2.dilate(x, kernel=f) == x, x) z = np.zeros_like(y, dtype=np.uint8) z[y > t] = 255 return z def min_max_norm(x): x -= np.min(x) x /= np.maximum(np.max(x), 1e-05) return x def safe_step(x, step=2): y = x.astype(np.float32) * float(step + 1) y = y.astype(np.int32).astype(np.float32) / float(step) return y def img2mask(img, H, W, low=10, high=90): assert img.ndim == 3 or img.ndim == 2 assert img.dtype == np.uint8 if img.ndim == 3: y = img[:, :, random.randrange(0, img.shape[2])] else: y = img y = cv2.resize(y, (W, H), interpolation=cv2.INTER_CUBIC) if random.uniform(0, 1) < 0.5: y = 255 - y return y < np.percentile(y, random.randrange(low, high)) def resize_image(input_image, resolution): (H, W, C) = input_image.shape H = float(H) W = float(W) k = float(resolution) / min(H, W) H *= k W *= k H = int(np.round(H / 64.0)) * 64 W = int(np.round(W / 64.0)) * 64 img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) return img def torch_gc(): if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() def ade_palette(): return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]] # File: controlnet_aux-master/src/controlnet_aux/zoe/__init__.py import os import cv2 import numpy as np import torch from einops import rearrange from huggingface_hub import hf_hub_download from PIL import Image from ..util import HWC3, resize_image from .zoedepth.models.zoedepth.zoedepth_v1 import ZoeDepth from .zoedepth.models.zoedepth_nk.zoedepth_nk_v1 import ZoeDepthNK from .zoedepth.utils.config import get_config class ZoeDetector: def __init__(self, model): self.model = model @classmethod def from_pretrained(cls, pretrained_model_or_path, model_type='zoedepth', filename=None, cache_dir=None, local_files_only=False): filename = filename or 'ZoeD_M12_N.pt' if os.path.isdir(pretrained_model_or_path): model_path = os.path.join(pretrained_model_or_path, filename) else: model_path = hf_hub_download(pretrained_model_or_path, filename, cache_dir=cache_dir, local_files_only=local_files_only) conf = get_config(model_type, 'infer') model_cls = ZoeDepth if model_type == 'zoedepth' else ZoeDepthNK model = model_cls.build_from_config(conf) model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu'))['model']) model.eval() return cls(model) def to(self, device): self.model.to(device) return self def __call__(self, input_image, detect_resolution=512, image_resolution=512, output_type=None, gamma_corrected=False): device = next(iter(self.model.parameters())).device if not isinstance(input_image, np.ndarray): input_image = np.array(input_image, dtype=np.uint8) output_type = output_type or 'pil' else: output_type = output_type or 'np' input_image = HWC3(input_image) input_image = resize_image(input_image, detect_resolution) assert input_image.ndim == 3 image_depth = input_image with torch.no_grad(): image_depth = torch.from_numpy(image_depth).float().to(device) image_depth = image_depth / 255.0 image_depth = rearrange(image_depth, 'h w c -> 1 c h w') depth = self.model.infer(image_depth) depth = depth[0, 0].cpu().numpy() vmin = np.percentile(depth, 2) vmax = np.percentile(depth, 85) depth -= vmin depth /= vmax - vmin depth = 1.0 - depth if gamma_corrected: depth = np.power(depth, 2.2) depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8) detected_map = depth_image detected_map = HWC3(detected_map) img = resize_image(input_image, image_resolution) (H, W, C) = img.shape detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) if output_type == 'pil': detected_map = Image.fromarray(detected_map) return detected_map # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas.py import os import torch import torch.nn as nn import numpy as np from torchvision.transforms import Normalize def denormalize(x): mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device) std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device) return x * std + mean def get_activation(name, bank): def hook(model, input, output): bank[name] = output return hook class Resize(object): def __init__(self, width, height, resize_target=True, keep_aspect_ratio=False, ensure_multiple_of=1, resize_method='lower_bound'): self.__width = width self.__height = height self.__keep_aspect_ratio = keep_aspect_ratio self.__multiple_of = ensure_multiple_of self.__resize_method = resize_method def constrain_to_multiple_of(self, x, min_val=0, max_val=None): y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) if max_val is not None and y > max_val: y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) if y < min_val: y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) return y def get_size(self, width, height): scale_height = self.__height / height scale_width = self.__width / width if self.__keep_aspect_ratio: if self.__resize_method == 'lower_bound': if scale_width > scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'upper_bound': if scale_width < scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'minimal': if abs(1 - scale_width) < abs(1 - scale_height): scale_height = scale_width else: scale_width = scale_height else: raise ValueError(f'resize_method {self.__resize_method} not implemented') if self.__resize_method == 'lower_bound': new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width) elif self.__resize_method == 'upper_bound': new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width) elif self.__resize_method == 'minimal': new_height = self.constrain_to_multiple_of(scale_height * height) new_width = self.constrain_to_multiple_of(scale_width * width) else: raise ValueError(f'resize_method {self.__resize_method} not implemented') return (new_width, new_height) def __call__(self, x): (width, height) = self.get_size(*x.shape[-2:][::-1]) return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True) class PrepForMidas(object): def __init__(self, resize_mode='minimal', keep_aspect_ratio=True, img_size=384, do_resize=True): if isinstance(img_size, int): img_size = (img_size, img_size) (net_h, net_w) = img_size self.normalization = Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) if do_resize else nn.Identity() def __call__(self, x): return self.normalization(self.resizer(x)) class MidasCore(nn.Module): def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True, img_size=384, **kwargs): super().__init__() self.core = midas self.output_channels = None self.core_out = {} self.trainable = trainable self.fetch_features = fetch_features self.handles = [] self.layer_names = layer_names self.set_trainable(trainable) self.set_fetch_features(fetch_features) self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio, img_size=img_size, do_resize=kwargs.get('do_resize', True)) if freeze_bn: self.freeze_bn() def set_trainable(self, trainable): self.trainable = trainable if trainable: self.unfreeze() else: self.freeze() return self def set_fetch_features(self, fetch_features): self.fetch_features = fetch_features if fetch_features: if len(self.handles) == 0: self.attach_hooks(self.core) else: self.remove_hooks() return self def freeze(self): for p in self.parameters(): p.requires_grad = False self.trainable = False return self def unfreeze(self): for p in self.parameters(): p.requires_grad = True self.trainable = True return self def freeze_bn(self): for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() return self def forward(self, x, denorm=False, return_rel_depth=False): with torch.no_grad(): if denorm: x = denormalize(x) x = self.prep(x) with torch.set_grad_enabled(self.trainable): rel_depth = self.core(x) if not self.fetch_features: return rel_depth out = [self.core_out[k] for k in self.layer_names] if return_rel_depth: return (rel_depth, out) return out def get_rel_pos_params(self): for (name, p) in self.core.pretrained.named_parameters(): if 'relative_position' in name: yield p def get_enc_params_except_rel_pos(self): for (name, p) in self.core.pretrained.named_parameters(): if 'relative_position' not in name: yield p def freeze_encoder(self, freeze_rel_pos=False): if freeze_rel_pos: for p in self.core.pretrained.parameters(): p.requires_grad = False else: for p in self.get_enc_params_except_rel_pos(): p.requires_grad = False return self def attach_hooks(self, midas): if len(self.handles) > 0: self.remove_hooks() if 'out_conv' in self.layer_names: self.handles.append(list(midas.scratch.output_conv.children())[3].register_forward_hook(get_activation('out_conv', self.core_out))) if 'r4' in self.layer_names: self.handles.append(midas.scratch.refinenet4.register_forward_hook(get_activation('r4', self.core_out))) if 'r3' in self.layer_names: self.handles.append(midas.scratch.refinenet3.register_forward_hook(get_activation('r3', self.core_out))) if 'r2' in self.layer_names: self.handles.append(midas.scratch.refinenet2.register_forward_hook(get_activation('r2', self.core_out))) if 'r1' in self.layer_names: self.handles.append(midas.scratch.refinenet1.register_forward_hook(get_activation('r1', self.core_out))) if 'l4_rn' in self.layer_names: self.handles.append(midas.scratch.layer4_rn.register_forward_hook(get_activation('l4_rn', self.core_out))) return self def remove_hooks(self): for h in self.handles: h.remove() return self def __del__(self): self.remove_hooks() def set_output_channels(self, model_type): self.output_channels = MIDAS_SETTINGS[model_type] @staticmethod def build(midas_model_type='DPT_BEiT_L_384', train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs): if midas_model_type not in MIDAS_SETTINGS: raise ValueError(f'Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}') if 'img_size' in kwargs: kwargs = MidasCore.parse_img_size(kwargs) img_size = kwargs.pop('img_size', [384, 384]) midas_path = os.path.join(os.path.dirname(__file__), 'midas_repo') midas = torch.hub.load(midas_path, midas_model_type, pretrained=use_pretrained_midas, force_reload=force_reload, source='local') kwargs.update({'keep_aspect_ratio': force_keep_ar}) midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features, freeze_bn=freeze_bn, img_size=img_size, **kwargs) midas_core.set_output_channels(midas_model_type) return midas_core @staticmethod def build_from_config(config): return MidasCore.build(**config) @staticmethod def parse_img_size(config): assert 'img_size' in config if isinstance(config['img_size'], str): assert ',' in config['img_size'], 'img_size should be a string with comma separated img_size=H,W' config['img_size'] = list(map(int, config['img_size'].split(','))) assert len(config['img_size']) == 2, 'img_size should be a string with comma separated img_size=H,W' elif isinstance(config['img_size'], int): config['img_size'] = [config['img_size'], config['img_size']] else: assert isinstance(config['img_size'], list) and len(config['img_size']) == 2, 'img_size should be a list of H,W' return config nchannels2models = {tuple([256] * 5): ['DPT_BEiT_L_384', 'DPT_BEiT_L_512', 'DPT_BEiT_B_384', 'DPT_SwinV2_L_384', 'DPT_SwinV2_B_384', 'DPT_SwinV2_T_256', 'DPT_Large', 'DPT_Hybrid'], (512, 256, 128, 64, 64): ['MiDaS_small']} MIDAS_SETTINGS = {m: k for (k, v) in nchannels2models.items() for m in v} # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/beit.py import timm import torch import types import numpy as np import torch.nn.functional as F from .utils import forward_adapted_unflatten, make_backbone_default from timm.models.beit import gen_relative_position_index from torch.utils.checkpoint import checkpoint from typing import Optional def forward_beit(pretrained, x): return forward_adapted_unflatten(pretrained, x, 'forward_features') def patch_embed_forward(self, x): x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) x = self.norm(x) return x def _get_rel_pos_bias(self, window_size): old_height = 2 * self.window_size[0] - 1 old_width = 2 * self.window_size[1] - 1 new_height = 2 * window_size[0] - 1 new_width = 2 * window_size[1] - 1 old_relative_position_bias_table = self.relative_position_bias_table old_num_relative_distance = self.num_relative_distance new_num_relative_distance = new_height * new_width + 3 old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3] old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2) new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode='bilinear') new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1) new_relative_position_bias_table = torch.cat([new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]]) key = str(window_size[1]) + ',' + str(window_size[0]) if key not in self.relative_position_indices.keys(): self.relative_position_indices[key] = gen_relative_position_index(window_size) relative_position_bias = new_relative_position_bias_table[self.relative_position_indices[key].view(-1)].view(window_size[0] * window_size[1] + 1, window_size[0] * window_size[1] + 1, -1) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() return relative_position_bias.unsqueeze(0) def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor]=None): (B, N, C) = x.shape qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) (q, k, v) = qkv.unbind(0) q = q * self.scale attn = q @ k.transpose(-2, -1) if self.relative_position_bias_table is not None: window_size = tuple(np.array(resolution) // 16) attn = attn + self._get_rel_pos_bias(window_size) if shared_rel_pos_bias is not None: attn = attn + shared_rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor]=None): if self.gamma_1 is None: x = x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x def beit_forward_features(self, x): resolution = x.shape[2:] x = self.patch_embed(x) x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: if self.grad_checkpointing and (not torch.jit.is_scripting()): x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias) else: x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias) x = self.norm(x) return x def _make_beit_backbone(model, features=[96, 192, 384, 768], size=[384, 384], hooks=[0, 4, 8, 11], vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1): backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, start_index_readout) backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed) backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model) for block in backbone.model.blocks: attn = block.attn attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn) attn.forward = types.MethodType(attention_forward, attn) attn.relative_position_indices = {} block.forward = types.MethodType(block_forward, block) return backbone def _make_pretrained_beitl16_512(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('beit_large_patch16_512', pretrained=pretrained) hooks = [5, 11, 17, 23] if hooks is None else hooks features = [256, 512, 1024, 1024] return _make_beit_backbone(model, features=features, size=[512, 512], hooks=hooks, vit_features=1024, use_readout=use_readout) def _make_pretrained_beitl16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('beit_large_patch16_384', pretrained=pretrained) hooks = [5, 11, 17, 23] if hooks is None else hooks return _make_beit_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout) def _make_pretrained_beitb16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('beit_base_patch16_384', pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks is None else hooks return _make_beit_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py import timm import torch import torch.nn as nn import numpy as np from .utils import activations, get_activation, Transpose def forward_levit(pretrained, x): pretrained.model.forward_features(x) layer_1 = pretrained.activations['1'] layer_2 = pretrained.activations['2'] layer_3 = pretrained.activations['3'] layer_1 = pretrained.act_postprocess1(layer_1) layer_2 = pretrained.act_postprocess2(layer_2) layer_3 = pretrained.act_postprocess3(layer_3) return (layer_1, layer_2, layer_3) def _make_levit_backbone(model, hooks=[3, 11, 21], patch_grid=[14, 14]): pretrained = nn.Module() pretrained.model = model pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('2')) pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('3')) pretrained.activations = activations patch_grid_size = np.array(patch_grid, dtype=int) pretrained.act_postprocess1 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))) pretrained.act_postprocess2 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size(np.ceil(patch_grid_size / 2).astype(int).tolist()))) pretrained.act_postprocess3 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size(np.ceil(patch_grid_size / 4).astype(int).tolist()))) return pretrained class ConvTransposeNorm(nn.Sequential): def __init__(self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False)) self.add_module('bn', nn.BatchNorm2d(out_chs)) nn.init.constant_(self.bn.weight, bn_weight_init) @torch.no_grad() def fuse(self): (c, bn) = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.ConvTranspose2d(w.size(1), w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def stem_b4_transpose(in_chs, out_chs, activation): return nn.Sequential(ConvTransposeNorm(in_chs, out_chs, 3, 2, 1), activation(), ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1), activation()) def _make_pretrained_levit_384(pretrained, hooks=None): model = timm.create_model('levit_384', pretrained=pretrained) hooks = [3, 11, 21] if hooks == None else hooks return _make_levit_backbone(model, hooks=hooks) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py import timm import torch.nn as nn from pathlib import Path from .utils import activations, forward_default, get_activation from ..external.next_vit.classification.nextvit import * def forward_next_vit(pretrained, x): return forward_default(pretrained, x, 'forward') def _make_next_vit_backbone(model, hooks=[2, 6, 36, 39]): pretrained = nn.Module() pretrained.model = model pretrained.model.features[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.features[hooks[1]].register_forward_hook(get_activation('2')) pretrained.model.features[hooks[2]].register_forward_hook(get_activation('3')) pretrained.model.features[hooks[3]].register_forward_hook(get_activation('4')) pretrained.activations = activations return pretrained def _make_pretrained_next_vit_large_6m(hooks=None): model = timm.create_model('nextvit_large') hooks = [2, 6, 36, 39] if hooks == None else hooks return _make_next_vit_backbone(model, hooks=hooks) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin2.py import timm from .swin_common import _make_swin_backbone def _make_pretrained_swin2l24_384(pretrained, hooks=None): model = timm.create_model('swinv2_large_window12to24_192to384_22kft1k', pretrained=pretrained) hooks = [1, 1, 17, 1] if hooks == None else hooks return _make_swin_backbone(model, hooks=hooks) def _make_pretrained_swin2b24_384(pretrained, hooks=None): model = timm.create_model('swinv2_base_window12to24_192to384_22kft1k', pretrained=pretrained) hooks = [1, 1, 17, 1] if hooks == None else hooks return _make_swin_backbone(model, hooks=hooks) def _make_pretrained_swin2t16_256(pretrained, hooks=None): model = timm.create_model('swinv2_tiny_window16_256', pretrained=pretrained) hooks = [1, 1, 5, 1] if hooks == None else hooks return _make_swin_backbone(model, hooks=hooks, patch_grid=[64, 64]) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py import torch import torch.nn as nn import numpy as np from .utils import activations, forward_default, get_activation, Transpose def forward_swin(pretrained, x): return forward_default(pretrained, x) def _make_swin_backbone(model, hooks=[1, 1, 17, 1], patch_grid=[96, 96]): pretrained = nn.Module() pretrained.model = model pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation('2')) pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation('3')) pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation('4')) pretrained.activations = activations if hasattr(model, 'patch_grid'): used_patch_grid = model.patch_grid else: used_patch_grid = patch_grid patch_grid_size = np.array(used_patch_grid, dtype=int) pretrained.act_postprocess1 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))) pretrained.act_postprocess2 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist()))) pretrained.act_postprocess3 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist()))) pretrained.act_postprocess4 = nn.Sequential(Transpose(1, 2), nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist()))) return pretrained # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/utils.py import torch import torch.nn as nn class Slice(nn.Module): def __init__(self, start_index=1): super(Slice, self).__init__() self.start_index = start_index def forward(self, x): return x[:, self.start_index:] class AddReadout(nn.Module): def __init__(self, start_index=1): super(AddReadout, self).__init__() self.start_index = start_index def forward(self, x): if self.start_index == 2: readout = (x[:, 0] + x[:, 1]) / 2 else: readout = x[:, 0] return x[:, self.start_index:] + readout.unsqueeze(1) class ProjectReadout(nn.Module): def __init__(self, in_features, start_index=1): super(ProjectReadout, self).__init__() self.start_index = start_index self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU()) def forward(self, x): readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:]) features = torch.cat((x[:, self.start_index:], readout), -1) return self.project(features) class Transpose(nn.Module): def __init__(self, dim0, dim1): super(Transpose, self).__init__() self.dim0 = dim0 self.dim1 = dim1 def forward(self, x): x = x.transpose(self.dim0, self.dim1) return x activations = {} def get_activation(name): def hook(model, input, output): activations[name] = output return hook def forward_default(pretrained, x, function_name='forward_features'): exec(f'pretrained.model.{function_name}(x)') layer_1 = pretrained.activations['1'] layer_2 = pretrained.activations['2'] layer_3 = pretrained.activations['3'] layer_4 = pretrained.activations['4'] if hasattr(pretrained, 'act_postprocess1'): layer_1 = pretrained.act_postprocess1(layer_1) if hasattr(pretrained, 'act_postprocess2'): layer_2 = pretrained.act_postprocess2(layer_2) if hasattr(pretrained, 'act_postprocess3'): layer_3 = pretrained.act_postprocess3(layer_3) if hasattr(pretrained, 'act_postprocess4'): layer_4 = pretrained.act_postprocess4(layer_4) return (layer_1, layer_2, layer_3, layer_4) def forward_adapted_unflatten(pretrained, x, function_name='forward_features'): (b, c, h, w) = x.shape exec(f'glob = pretrained.model.{function_name}(x)') layer_1 = pretrained.activations['1'] layer_2 = pretrained.activations['2'] layer_3 = pretrained.activations['3'] layer_4 = pretrained.activations['4'] layer_1 = pretrained.act_postprocess1[0:2](layer_1) layer_2 = pretrained.act_postprocess2[0:2](layer_2) layer_3 = pretrained.act_postprocess3[0:2](layer_3) layer_4 = pretrained.act_postprocess4[0:2](layer_4) unflatten = nn.Sequential(nn.Unflatten(2, torch.Size([h // pretrained.model.patch_size[1], w // pretrained.model.patch_size[0]]))) if layer_1.ndim == 3: layer_1 = unflatten(layer_1) if layer_2.ndim == 3: layer_2 = unflatten(layer_2) if layer_3.ndim == 3: layer_3 = unflatten(layer_3) if layer_4.ndim == 3: layer_4 = unflatten(layer_4) layer_1 = pretrained.act_postprocess1[3:len(pretrained.act_postprocess1)](layer_1) layer_2 = pretrained.act_postprocess2[3:len(pretrained.act_postprocess2)](layer_2) layer_3 = pretrained.act_postprocess3[3:len(pretrained.act_postprocess3)](layer_3) layer_4 = pretrained.act_postprocess4[3:len(pretrained.act_postprocess4)](layer_4) return (layer_1, layer_2, layer_3, layer_4) def get_readout_oper(vit_features, features, use_readout, start_index=1): if use_readout == 'ignore': readout_oper = [Slice(start_index)] * len(features) elif use_readout == 'add': readout_oper = [AddReadout(start_index)] * len(features) elif use_readout == 'project': readout_oper = [ProjectReadout(vit_features, start_index) for out_feat in features] else: assert False, "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'" return readout_oper def make_backbone_default(model, features=[96, 192, 384, 768], size=[384, 384], hooks=[2, 5, 8, 11], vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1): pretrained = nn.Module() pretrained.model = model pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation('1')) pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation('2')) pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation('3')) pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation('4')) pretrained.activations = activations readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout) pretrained.act_postprocess1 = nn.Sequential(readout_oper[0], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[0], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[0], out_channels=features[0], kernel_size=4, stride=4, padding=0, bias=True, dilation=1, groups=1)) pretrained.act_postprocess2 = nn.Sequential(readout_oper[1], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[1], kernel_size=1, stride=1, padding=0), nn.ConvTranspose2d(in_channels=features[1], out_channels=features[1], kernel_size=2, stride=2, padding=0, bias=True, dilation=1, groups=1)) pretrained.act_postprocess3 = nn.Sequential(readout_oper[2], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[2], kernel_size=1, stride=1, padding=0)) pretrained.act_postprocess4 = nn.Sequential(readout_oper[3], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[3], kernel_size=1, stride=1, padding=0), nn.Conv2d(in_channels=features[3], out_channels=features[3], kernel_size=3, stride=2, padding=1)) pretrained.model.start_index = start_index pretrained.model.patch_size = [16, 16] return pretrained # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py import torch import torch.nn as nn import timm import types import math import torch.nn.functional as F from .utils import activations, forward_adapted_unflatten, get_activation, get_readout_oper, make_backbone_default, Transpose def forward_vit(pretrained, x): return forward_adapted_unflatten(pretrained, x, 'forward_flex') def _resize_pos_embed(self, posemb, gs_h, gs_w): (posemb_tok, posemb_grid) = (posemb[:, :self.start_index], posemb[0, self.start_index:]) gs_old = int(math.sqrt(len(posemb_grid))) posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode='bilinear') posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1) posemb = torch.cat([posemb_tok, posemb_grid], dim=1) return posemb def forward_flex(self, x): (b, c, h, w) = x.shape pos_embed = self._resize_pos_embed(self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]) B = x.shape[0] if hasattr(self.patch_embed, 'backbone'): x = self.patch_embed.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] x = self.patch_embed.proj(x).flatten(2).transpose(1, 2) if getattr(self, 'dist_token', None) is not None: cls_tokens = self.cls_token.expand(B, -1, -1) dist_token = self.dist_token.expand(B, -1, -1) x = torch.cat((cls_tokens, dist_token, x), dim=1) else: if self.no_embed_class: x = x + pos_embed cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if not self.no_embed_class: x = x + pos_embed x = self.pos_drop(x) for blk in self.blocks: x = blk(x) x = self.norm(x) return x def _make_vit_b16_backbone(model, features=[96, 192, 384, 768], size=[384, 384], hooks=[2, 5, 8, 11], vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1): pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, start_index_readout) pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) pretrained.model._resize_pos_embed = types.MethodType(_resize_pos_embed, pretrained.model) return pretrained def _make_pretrained_vitl16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_large_patch16_384', pretrained=pretrained) hooks = [5, 11, 17, 23] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout) def _make_pretrained_vitb16_384(pretrained, use_readout='ignore', hooks=None): model = timm.create_model('vit_base_patch16_384', pretrained=pretrained) hooks = [2, 5, 8, 11] if hooks == None else hooks return _make_vit_b16_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout) def _make_vit_b_rn50_backbone(model, features=[256, 512, 768, 768], size=[384, 384], hooks=[0, 1, 8, 11], vit_features=768, patch_size=[16, 16], number_stages=2, use_vit_only=False, use_readout='ignore', start_index=1): pretrained = nn.Module() pretrained.model = model used_number_stages = 0 if use_vit_only else number_stages for s in range(used_number_stages): pretrained.model.patch_embed.backbone.stages[s].register_forward_hook(get_activation(str(s + 1))) for s in range(used_number_stages, 4): pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1))) pretrained.activations = activations readout_oper = get_readout_oper(vit_features, features, use_readout, start_index) for s in range(used_number_stages): value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity()) exec(f'pretrained.act_postprocess{s + 1}=value') for s in range(used_number_stages, 4): if s < number_stages: final_layer = nn.ConvTranspose2d(in_channels=features[s], out_channels=features[s], kernel_size=4 // 2 ** s, stride=4 // 2 ** s, padding=0, bias=True, dilation=1, groups=1) elif s > number_stages: final_layer = nn.Conv2d(in_channels=features[3], out_channels=features[3], kernel_size=3, stride=2, padding=1) else: final_layer = None layers = [readout_oper[s], Transpose(1, 2), nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])), nn.Conv2d(in_channels=vit_features, out_channels=features[s], kernel_size=1, stride=1, padding=0)] if final_layer is not None: layers.append(final_layer) value = nn.Sequential(*layers) exec(f'pretrained.act_postprocess{s + 1}=value') pretrained.model.start_index = start_index pretrained.model.patch_size = patch_size pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model) pretrained.model._resize_pos_embed = types.MethodType(_resize_pos_embed, pretrained.model) return pretrained def _make_pretrained_vitb_rn50_384(pretrained, use_readout='ignore', hooks=None, use_vit_only=False): model = timm.create_model('vit_base_resnet50_384', pretrained=pretrained) hooks = [0, 1, 8, 11] if hooks == None else hooks return _make_vit_b_rn50_backbone(model, features=[256, 512, 768, 768], size=[384, 384], hooks=hooks, use_vit_only=use_vit_only, use_readout=use_readout) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/base_model.py import torch class BaseModel(torch.nn.Module): def load(self, path): parameters = torch.load(path, map_location=torch.device('cpu')) if 'optimizer' in parameters: parameters = parameters['model'] self.load_state_dict(parameters) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py import torch import torch.nn as nn from .backbones.beit import _make_pretrained_beitl16_512, _make_pretrained_beitl16_384, _make_pretrained_beitb16_384, forward_beit from .backbones.swin_common import forward_swin from .backbones.swin2 import _make_pretrained_swin2l24_384, _make_pretrained_swin2b24_384, _make_pretrained_swin2t16_256 from .backbones.swin import _make_pretrained_swinl12_384 from .backbones.levit import _make_pretrained_levit_384, forward_levit from .backbones.vit import _make_pretrained_vitb_rn50_384, _make_pretrained_vitl16_384, _make_pretrained_vitb16_384, forward_vit def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout='ignore', in_features=[96, 256, 512, 1024]): if backbone == 'beitl16_512': pretrained = _make_pretrained_beitl16_512(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([256, 512, 1024, 1024], features, groups=groups, expand=expand) elif backbone == 'beitl16_384': pretrained = _make_pretrained_beitl16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([256, 512, 1024, 1024], features, groups=groups, expand=expand) elif backbone == 'beitb16_384': pretrained = _make_pretrained_beitb16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([96, 192, 384, 768], features, groups=groups, expand=expand) elif backbone == 'swin2l24_384': pretrained = _make_pretrained_swin2l24_384(use_pretrained, hooks=hooks) scratch = _make_scratch([192, 384, 768, 1536], features, groups=groups, expand=expand) elif backbone == 'swin2b24_384': pretrained = _make_pretrained_swin2b24_384(use_pretrained, hooks=hooks) scratch = _make_scratch([128, 256, 512, 1024], features, groups=groups, expand=expand) elif backbone == 'swin2t16_256': pretrained = _make_pretrained_swin2t16_256(use_pretrained, hooks=hooks) scratch = _make_scratch([96, 192, 384, 768], features, groups=groups, expand=expand) elif backbone == 'swinl12_384': pretrained = _make_pretrained_swinl12_384(use_pretrained, hooks=hooks) scratch = _make_scratch([192, 384, 768, 1536], features, groups=groups, expand=expand) elif backbone == 'next_vit_large_6m': from .backbones.next_vit import _make_pretrained_next_vit_large_6m pretrained = _make_pretrained_next_vit_large_6m(hooks=hooks) scratch = _make_scratch(in_features, features, groups=groups, expand=expand) elif backbone == 'levit_384': pretrained = _make_pretrained_levit_384(use_pretrained, hooks=hooks) scratch = _make_scratch([384, 512, 768], features, groups=groups, expand=expand) elif backbone == 'vitl16_384': pretrained = _make_pretrained_vitl16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([256, 512, 1024, 1024], features, groups=groups, expand=expand) elif backbone == 'vitb_rn50_384': pretrained = _make_pretrained_vitb_rn50_384(use_pretrained, hooks=hooks, use_vit_only=use_vit_only, use_readout=use_readout) scratch = _make_scratch([256, 512, 768, 768], features, groups=groups, expand=expand) elif backbone == 'vitb16_384': pretrained = _make_pretrained_vitb16_384(use_pretrained, hooks=hooks, use_readout=use_readout) scratch = _make_scratch([96, 192, 384, 768], features, groups=groups, expand=expand) elif backbone == 'resnext101_wsl': pretrained = _make_pretrained_resnext101_wsl(use_pretrained) scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) elif backbone == 'efficientnet_lite3': pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) else: print(f"Backbone '{backbone}' not implemented") assert False return (pretrained, scratch) def _make_scratch(in_shape, out_shape, groups=1, expand=False): scratch = nn.Module() out_shape1 = out_shape out_shape2 = out_shape out_shape3 = out_shape if len(in_shape) >= 4: out_shape4 = out_shape if expand: out_shape1 = out_shape out_shape2 = out_shape * 2 out_shape3 = out_shape * 4 if len(in_shape) >= 4: out_shape4 = out_shape * 8 scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) if len(in_shape) >= 4: scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups) return scratch def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): efficientnet = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'tf_efficientnet_lite3', pretrained=use_pretrained, exportable=exportable) return _make_efficientnet_backbone(efficientnet) def _make_efficientnet_backbone(effnet): pretrained = nn.Module() pretrained.layer1 = nn.Sequential(effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]) pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) return pretrained def _make_resnet_backbone(resnet): pretrained = nn.Module() pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1) pretrained.layer2 = resnet.layer2 pretrained.layer3 = resnet.layer3 pretrained.layer4 = resnet.layer4 return pretrained def _make_pretrained_resnext101_wsl(use_pretrained): resnet = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl') return _make_resnet_backbone(resnet) class Interpolate(nn.Module): def __init__(self, scale_factor, mode, align_corners=False): super(Interpolate, self).__init__() self.interp = nn.functional.interpolate self.scale_factor = scale_factor self.mode = mode self.align_corners = align_corners def forward(self, x): x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners) return x class ResidualConvUnit(nn.Module): def __init__(self, features): super().__init__() self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.relu(x) out = self.conv1(out) out = self.relu(out) out = self.conv2(out) return out + x class FeatureFusionBlock(nn.Module): def __init__(self, features): super(FeatureFusionBlock, self).__init__() self.resConfUnit1 = ResidualConvUnit(features) self.resConfUnit2 = ResidualConvUnit(features) def forward(self, *xs): output = xs[0] if len(xs) == 2: output += self.resConfUnit1(xs[1]) output = self.resConfUnit2(output) output = nn.functional.interpolate(output, scale_factor=2, mode='bilinear', align_corners=True) return output class ResidualConvUnit_custom(nn.Module): def __init__(self, features, activation, bn): super().__init__() self.bn = bn self.groups = 1 self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) if self.bn == True: self.bn1 = nn.BatchNorm2d(features) self.bn2 = nn.BatchNorm2d(features) self.activation = activation self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): out = self.activation(x) out = self.conv1(out) if self.bn == True: out = self.bn1(out) out = self.activation(out) out = self.conv2(out) if self.bn == True: out = self.bn2(out) if self.groups > 1: out = self.conv_merge(out) return self.skip_add.add(out, x) class FeatureFusionBlock_custom(nn.Module): def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None): super(FeatureFusionBlock_custom, self).__init__() self.deconv = deconv self.align_corners = align_corners self.groups = 1 self.expand = expand out_features = features if self.expand == True: out_features = features // 2 self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) self.skip_add = nn.quantized.FloatFunctional() self.size = size def forward(self, *xs, size=None): output = xs[0] if len(xs) == 2: res = self.resConfUnit1(xs[1]) output = self.skip_add.add(output, res) output = self.resConfUnit2(output) if size is None and self.size is None: modifier = {'scale_factor': 2} elif size is None: modifier = {'size': self.size} else: modifier = {'size': size} output = nn.functional.interpolate(output, **modifier, mode='bilinear', align_corners=self.align_corners) output = self.out_conv(output) return output # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/dpt_depth.py import torch import torch.nn as nn from .base_model import BaseModel from .blocks import FeatureFusionBlock_custom, Interpolate, _make_encoder, forward_beit, forward_swin, forward_levit, forward_vit from .backbones.levit import stem_b4_transpose from timm.models.layers import get_act_layer def _make_fusion_block(features, use_bn, size=None): return FeatureFusionBlock_custom(features, nn.ReLU(False), deconv=False, bn=use_bn, expand=False, align_corners=True, size=size) class DPT(BaseModel): def __init__(self, head, features=256, backbone='vitb_rn50_384', readout='project', channels_last=False, use_bn=False, **kwargs): super(DPT, self).__init__() self.channels_last = channels_last hooks = {'beitl16_512': [5, 11, 17, 23], 'beitl16_384': [5, 11, 17, 23], 'beitb16_384': [2, 5, 8, 11], 'swin2l24_384': [1, 1, 17, 1], 'swin2b24_384': [1, 1, 17, 1], 'swin2t16_256': [1, 1, 5, 1], 'swinl12_384': [1, 1, 17, 1], 'next_vit_large_6m': [2, 6, 36, 39], 'levit_384': [3, 11, 21], 'vitb_rn50_384': [0, 1, 8, 11], 'vitb16_384': [2, 5, 8, 11], 'vitl16_384': [5, 11, 17, 23]}[backbone] if 'next_vit' in backbone: in_features = {'next_vit_large_6m': [96, 256, 512, 1024]}[backbone] else: in_features = None (self.pretrained, self.scratch) = _make_encoder(backbone, features, False, groups=1, expand=False, exportable=False, hooks=hooks, use_readout=readout, in_features=in_features) self.number_layers = len(hooks) if hooks is not None else 4 size_refinenet3 = None self.scratch.stem_transpose = None if 'beit' in backbone: self.forward_transformer = forward_beit elif 'swin' in backbone: self.forward_transformer = forward_swin elif 'next_vit' in backbone: from .backbones.next_vit import forward_next_vit self.forward_transformer = forward_next_vit elif 'levit' in backbone: self.forward_transformer = forward_levit size_refinenet3 = 7 self.scratch.stem_transpose = stem_b4_transpose(256, 128, get_act_layer('hard_swish')) else: self.forward_transformer = forward_vit self.scratch.refinenet1 = _make_fusion_block(features, use_bn) self.scratch.refinenet2 = _make_fusion_block(features, use_bn) self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3) if self.number_layers >= 4: self.scratch.refinenet4 = _make_fusion_block(features, use_bn) self.scratch.output_conv = head def forward(self, x): if self.channels_last == True: x.contiguous(memory_format=torch.channels_last) layers = self.forward_transformer(self.pretrained, x) if self.number_layers == 3: (layer_1, layer_2, layer_3) = layers else: (layer_1, layer_2, layer_3, layer_4) = layers layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) if self.number_layers >= 4: layer_4_rn = self.scratch.layer4_rn(layer_4) if self.number_layers == 3: path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:]) else: path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) if self.scratch.stem_transpose is not None: path_1 = self.scratch.stem_transpose(path_1) out = self.scratch.output_conv(path_1) return out class DPTDepthModel(DPT): def __init__(self, path=None, non_negative=True, **kwargs): features = kwargs['features'] if 'features' in kwargs else 256 head_features_1 = kwargs['head_features_1'] if 'head_features_1' in kwargs else features head_features_2 = kwargs['head_features_2'] if 'head_features_2' in kwargs else 32 kwargs.pop('head_features_1', None) kwargs.pop('head_features_2', None) head = nn.Sequential(nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity()) super().__init__(head, **kwargs) if path is not None: self.load(path) def forward(self, x): return super().forward(x).squeeze(dim=1) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net.py """""" import torch import torch.nn as nn from .base_model import BaseModel from .blocks import FeatureFusionBlock, Interpolate, _make_encoder class MidasNet(BaseModel): def __init__(self, path=None, features=256, non_negative=True): print('Loading weights: ', path) super(MidasNet, self).__init__() use_pretrained = False if path is None else True (self.pretrained, self.scratch) = _make_encoder(backbone='resnext101_wsl', features=features, use_pretrained=use_pretrained) self.scratch.refinenet4 = FeatureFusionBlock(features) self.scratch.refinenet3 = FeatureFusionBlock(features) self.scratch.refinenet2 = FeatureFusionBlock(features) self.scratch.refinenet1 = FeatureFusionBlock(features) self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'), nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1), nn.ReLU(True), nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity()) if path: self.load(path) def forward(self, x): layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py """""" import torch import torch.nn as nn from .base_model import BaseModel from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder class MidasNet_small(BaseModel): def __init__(self, path=None, features=64, backbone='efficientnet_lite3', non_negative=True, exportable=True, channels_last=False, align_corners=True, blocks={'expand': True}): print('Loading weights: ', path) super(MidasNet_small, self).__init__() use_pretrained = False if path else True self.channels_last = channels_last self.blocks = blocks self.backbone = backbone self.groups = 1 features1 = features features2 = features features3 = features features4 = features self.expand = False if 'expand' in self.blocks and self.blocks['expand'] == True: self.expand = True features1 = features features2 = features * 2 features3 = features * 4 features4 = features * 8 (self.pretrained, self.scratch) = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) self.scratch.activation = nn.ReLU(False) self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1, groups=self.groups), Interpolate(scale_factor=2, mode='bilinear'), nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1), self.scratch.activation, nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity()) if path: self.load(path) def forward(self, x): if self.channels_last == True: print('self.channels_last = ', self.channels_last) x.contiguous(memory_format=torch.channels_last) layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1) def fuse_model(m): prev_previous_type = nn.Identity() prev_previous_name = '' previous_type = nn.Identity() previous_name = '' for (name, module) in m.named_modules(): if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and (type(module) == nn.ReLU): torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) prev_previous_type = previous_type prev_previous_name = previous_name previous_type = type(module) previous_name = name # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/model_loader.py import cv2 import torch from midas.dpt_depth import DPTDepthModel from midas.midas_net import MidasNet from midas.midas_net_custom import MidasNet_small from midas.transforms import Resize, NormalizeImage, PrepareForNet from torchvision.transforms import Compose default_models = {'dpt_beit_large_512': 'weights/dpt_beit_large_512.pt', 'dpt_beit_large_384': 'weights/dpt_beit_large_384.pt', 'dpt_beit_base_384': 'weights/dpt_beit_base_384.pt', 'dpt_swin2_large_384': 'weights/dpt_swin2_large_384.pt', 'dpt_swin2_base_384': 'weights/dpt_swin2_base_384.pt', 'dpt_swin2_tiny_256': 'weights/dpt_swin2_tiny_256.pt', 'dpt_swin_large_384': 'weights/dpt_swin_large_384.pt', 'dpt_next_vit_large_384': 'weights/dpt_next_vit_large_384.pt', 'dpt_levit_224': 'weights/dpt_levit_224.pt', 'dpt_large_384': 'weights/dpt_large_384.pt', 'dpt_hybrid_384': 'weights/dpt_hybrid_384.pt', 'midas_v21_384': 'weights/midas_v21_384.pt', 'midas_v21_small_256': 'weights/midas_v21_small_256.pt', 'openvino_midas_v21_small_256': 'weights/openvino_midas_v21_small_256.xml'} def load_model(device, model_path, model_type='dpt_large_384', optimize=True, height=None, square=False): if 'openvino' in model_type: from openvino.runtime import Core keep_aspect_ratio = not square if model_type == 'dpt_beit_large_512': model = DPTDepthModel(path=model_path, backbone='beitl16_512', non_negative=True) (net_w, net_h) = (512, 512) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_beit_large_384': model = DPTDepthModel(path=model_path, backbone='beitl16_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_beit_base_384': model = DPTDepthModel(path=model_path, backbone='beitb16_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_swin2_large_384': model = DPTDepthModel(path=model_path, backbone='swin2l24_384', non_negative=True) (net_w, net_h) = (384, 384) keep_aspect_ratio = False resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_swin2_base_384': model = DPTDepthModel(path=model_path, backbone='swin2b24_384', non_negative=True) (net_w, net_h) = (384, 384) keep_aspect_ratio = False resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_swin2_tiny_256': model = DPTDepthModel(path=model_path, backbone='swin2t16_256', non_negative=True) (net_w, net_h) = (256, 256) keep_aspect_ratio = False resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_swin_large_384': model = DPTDepthModel(path=model_path, backbone='swinl12_384', non_negative=True) (net_w, net_h) = (384, 384) keep_aspect_ratio = False resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_next_vit_large_384': model = DPTDepthModel(path=model_path, backbone='next_vit_large_6m', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_levit_224': model = DPTDepthModel(path=model_path, backbone='levit_384', non_negative=True, head_features_1=64, head_features_2=8) (net_w, net_h) = (224, 224) keep_aspect_ratio = False resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_large_384': model = DPTDepthModel(path=model_path, backbone='vitl16_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'dpt_hybrid_384': model = DPTDepthModel(path=model_path, backbone='vitb_rn50_384', non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'minimal' normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) elif model_type == 'midas_v21_384': model = MidasNet(model_path, non_negative=True) (net_w, net_h) = (384, 384) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif model_type == 'midas_v21_small_256': model = MidasNet_small(model_path, features=64, backbone='efficientnet_lite3', exportable=True, non_negative=True, blocks={'expand': True}) (net_w, net_h) = (256, 256) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif model_type == 'openvino_midas_v21_small_256': ie = Core() uncompiled_model = ie.read_model(model=model_path) model = ie.compile_model(uncompiled_model, 'CPU') (net_w, net_h) = (256, 256) resize_mode = 'upper_bound' normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) else: print(f"model_type '{model_type}' not implemented, use: --model_type large") assert False if not 'openvino' in model_type: print('Model loaded, number of parameters = {:.0f}M'.format(sum((p.numel() for p in model.parameters())) / 1000000.0)) else: print('Model loaded, optimized with OpenVINO') if 'openvino' in model_type: keep_aspect_ratio = False if height is not None: (net_w, net_h) = (height, height) transform = Compose([Resize(net_w, net_h, resize_target=None, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode, image_interpolation_method=cv2.INTER_CUBIC), normalization, PrepareForNet()]) if not 'openvino' in model_type: model.eval() if optimize and device == torch.device('cuda'): if not 'openvino' in model_type: model = model.to(memory_format=torch.channels_last) model = model.half() else: print('Error: OpenVINO models are already optimized. No optimization to half-float possible.') exit() if not 'openvino' in model_type: model.to(device) return (model, transform, net_w, net_h) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/base_models/midas_repo/midas/transforms.py import numpy as np import cv2 import math def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): shape = list(sample['disparity'].shape) if shape[0] >= size[0] and shape[1] >= size[1]: return sample scale = [0, 0] scale[0] = size[0] / shape[0] scale[1] = size[1] / shape[1] scale = max(scale) shape[0] = math.ceil(scale * shape[0]) shape[1] = math.ceil(scale * shape[1]) sample['image'] = cv2.resize(sample['image'], tuple(shape[::-1]), interpolation=image_interpolation_method) sample['disparity'] = cv2.resize(sample['disparity'], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST) sample['mask'] = cv2.resize(sample['mask'].astype(np.float32), tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST) sample['mask'] = sample['mask'].astype(bool) return tuple(shape) class Resize(object): def __init__(self, width, height, resize_target=True, keep_aspect_ratio=False, ensure_multiple_of=1, resize_method='lower_bound', image_interpolation_method=cv2.INTER_AREA): self.__width = width self.__height = height self.__resize_target = resize_target self.__keep_aspect_ratio = keep_aspect_ratio self.__multiple_of = ensure_multiple_of self.__resize_method = resize_method self.__image_interpolation_method = image_interpolation_method def constrain_to_multiple_of(self, x, min_val=0, max_val=None): y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) if max_val is not None and y > max_val: y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) if y < min_val: y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) return y def get_size(self, width, height): scale_height = self.__height / height scale_width = self.__width / width if self.__keep_aspect_ratio: if self.__resize_method == 'lower_bound': if scale_width > scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'upper_bound': if scale_width < scale_height: scale_height = scale_width else: scale_width = scale_height elif self.__resize_method == 'minimal': if abs(1 - scale_width) < abs(1 - scale_height): scale_height = scale_width else: scale_width = scale_height else: raise ValueError(f'resize_method {self.__resize_method} not implemented') if self.__resize_method == 'lower_bound': new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width) elif self.__resize_method == 'upper_bound': new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height) new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width) elif self.__resize_method == 'minimal': new_height = self.constrain_to_multiple_of(scale_height * height) new_width = self.constrain_to_multiple_of(scale_width * width) else: raise ValueError(f'resize_method {self.__resize_method} not implemented') return (new_width, new_height) def __call__(self, sample): (width, height) = self.get_size(sample['image'].shape[1], sample['image'].shape[0]) sample['image'] = cv2.resize(sample['image'], (width, height), interpolation=self.__image_interpolation_method) if self.__resize_target: if 'disparity' in sample: sample['disparity'] = cv2.resize(sample['disparity'], (width, height), interpolation=cv2.INTER_NEAREST) if 'depth' in sample: sample['depth'] = cv2.resize(sample['depth'], (width, height), interpolation=cv2.INTER_NEAREST) sample['mask'] = cv2.resize(sample['mask'].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST) sample['mask'] = sample['mask'].astype(bool) return sample class NormalizeImage(object): def __init__(self, mean, std): self.__mean = mean self.__std = std def __call__(self, sample): sample['image'] = (sample['image'] - self.__mean) / self.__std return sample class PrepareForNet(object): def __init__(self): pass def __call__(self, sample): image = np.transpose(sample['image'], (2, 0, 1)) sample['image'] = np.ascontiguousarray(image).astype(np.float32) if 'mask' in sample: sample['mask'] = sample['mask'].astype(np.float32) sample['mask'] = np.ascontiguousarray(sample['mask']) if 'disparity' in sample: disparity = sample['disparity'].astype(np.float32) sample['disparity'] = np.ascontiguousarray(disparity) if 'depth' in sample: depth = sample['depth'].astype(np.float32) sample['depth'] = np.ascontiguousarray(depth) return sample # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/builder.py from importlib import import_module from .depth_model import DepthModel def build_model(config) -> DepthModel: module_name = f'zoedepth.models.{config.model}' try: module = import_module(module_name) except ModuleNotFoundError as e: print(e) raise ValueError(f'Model {config.model} not found. Refer above error for details.') from e try: get_version = getattr(module, 'get_version') except AttributeError as e: raise ValueError(f'Model {config.model} has no get_version function.') from e return get_version(config.version_name).build_from_config(config) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/depth_model.py import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torchvision import transforms import PIL.Image from PIL import Image from typing import Union class DepthModel(nn.Module): def __init__(self): super().__init__() self.device = 'cpu' def to(self, device) -> nn.Module: self.device = device return super().to(device) def forward(self, x, *args, **kwargs): raise NotImplementedError def _infer(self, x: torch.Tensor): return self(x)['metric_depth'] def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode='reflect', **kwargs) -> torch.Tensor: assert x.dim() == 4, 'x must be 4 dimensional, got {}'.format(x.dim()) assert x.shape[1] == 3, 'x must have 3 channels, got {}'.format(x.shape[1]) if pad_input: assert fh > 0 or fw > 0, 'atlease one of fh and fw must be greater than 0' pad_h = int(np.sqrt(x.shape[2] / 2) * fh) pad_w = int(np.sqrt(x.shape[3] / 2) * fw) padding = [pad_w, pad_w] if pad_h > 0: padding += [pad_h, pad_h] x = F.pad(x, padding, mode=padding_mode, **kwargs) out = self._infer(x) if out.shape[-2:] != x.shape[-2:]: out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False) if pad_input: if pad_h > 0: out = out[:, :, pad_h:-pad_h, :] if pad_w > 0: out = out[:, :, :, pad_w:-pad_w] return out def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor: out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs) out = (out + torch.flip(out_flip, dims=[3])) / 2 return out def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor: if with_flip_aug: return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs) else: return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) @torch.no_grad() def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str='numpy', **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]: x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device) out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs) if output_type == 'numpy': return out_tensor.squeeze().cpu().numpy() elif output_type == 'pil': out_16bit_numpy = (out_tensor.squeeze().cpu().numpy() * 256).astype(np.uint16) return Image.fromarray(out_16bit_numpy) elif output_type == 'tensor': return out_tensor.squeeze().cpu() else: raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'") # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/layers/attractor.py import torch import torch.nn as nn @torch.jit.script def exp_attractor(dx, alpha: float=300, gamma: int=2): return torch.exp(-alpha * torch.abs(dx) ** gamma) * dx @torch.jit.script def inv_attractor(dx, alpha: float=300, gamma: int=2): return dx.div(1 + alpha * dx.pow(gamma)) class AttractorLayer(nn.Module): def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=0.001, max_depth=10, alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): super().__init__() self.n_attractors = n_attractors self.n_bins = n_bins self.min_depth = min_depth self.max_depth = max_depth self.alpha = alpha self.gamma = gamma self.kind = kind self.attractor_type = attractor_type self.memory_efficient = memory_efficient self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_attractors * 2, 1, 1, 0), nn.ReLU(inplace=True)) def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): if prev_b_embedding is not None: if interpolate: prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) x = x + prev_b_embedding A = self._net(x) eps = 0.001 A = A + eps (n, c, h, w) = A.shape A = A.view(n, self.n_attractors, 2, h, w) A_normed = A / A.sum(dim=2, keepdim=True) A_normed = A[:, :, 0, ...] b_prev = nn.functional.interpolate(b_prev, (h, w), mode='bilinear', align_corners=True) b_centers = b_prev if self.attractor_type == 'exp': dist = exp_attractor else: dist = inv_attractor if not self.memory_efficient: func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] delta_c = func(dist(A_normed.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1) else: delta_c = torch.zeros_like(b_centers, device=b_centers.device) for i in range(self.n_attractors): delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers) if self.kind == 'mean': delta_c = delta_c / self.n_attractors b_new_centers = b_centers + delta_c B_centers = (self.max_depth - self.min_depth) * b_new_centers + self.min_depth (B_centers, _) = torch.sort(B_centers, dim=1) B_centers = torch.clip(B_centers, self.min_depth, self.max_depth) return (b_new_centers, B_centers) class AttractorLayerUnnormed(nn.Module): def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=0.001, max_depth=10, alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False): super().__init__() self.n_attractors = n_attractors self.n_bins = n_bins self.min_depth = min_depth self.max_depth = max_depth self.alpha = alpha self.gamma = gamma self.kind = kind self.attractor_type = attractor_type self.memory_efficient = memory_efficient self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0), nn.Softplus()) def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): if prev_b_embedding is not None: if interpolate: prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) x = x + prev_b_embedding A = self._net(x) (n, c, h, w) = A.shape b_prev = nn.functional.interpolate(b_prev, (h, w), mode='bilinear', align_corners=True) b_centers = b_prev if self.attractor_type == 'exp': dist = exp_attractor else: dist = inv_attractor if not self.memory_efficient: func = {'mean': torch.mean, 'sum': torch.sum}[self.kind] delta_c = func(dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1) else: delta_c = torch.zeros_like(b_centers, device=b_centers.device) for i in range(self.n_attractors): delta_c += dist(A[:, i, ...].unsqueeze(1) - b_centers) if self.kind == 'mean': delta_c = delta_c / self.n_attractors b_new_centers = b_centers + delta_c B_centers = b_new_centers return (b_new_centers, B_centers) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/layers/dist_layers.py import torch import torch.nn as nn def log_binom(n, k, eps=1e-07): n = n + eps k = k + eps return n * torch.log(n) - k * torch.log(k) - (n - k) * torch.log(n - k + eps) class LogBinomial(nn.Module): def __init__(self, n_classes=256, act=torch.softmax): super().__init__() self.K = n_classes self.act = act self.register_buffer('k_idx', torch.arange(0, n_classes).view(1, -1, 1, 1)) self.register_buffer('K_minus_1', torch.Tensor([self.K - 1]).view(1, -1, 1, 1)) def forward(self, x, t=1.0, eps=0.0001): if x.ndim == 3: x = x.unsqueeze(1) one_minus_x = torch.clamp(1 - x, eps, 1) x = torch.clamp(x, eps, 1) y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x) return self.act(y / t, dim=1) class ConditionalLogBinomial(nn.Module): def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=0.0001, max_temp=50, min_temp=1e-07, act=torch.softmax): super().__init__() self.p_eps = p_eps self.max_temp = max_temp self.min_temp = min_temp self.log_binomial_transform = LogBinomial(n_classes, act=act) bottleneck = (in_features + condition_dim) // bottleneck_factor self.mlp = nn.Sequential(nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus()) def forward(self, x, cond): pt = self.mlp(torch.concat((x, cond), dim=1)) (p, t) = (pt[:, :2, ...], pt[:, 2:, ...]) p = p + self.p_eps p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...]) t = t + self.p_eps t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...]) t = t.unsqueeze(1) t = (self.max_temp - self.min_temp) * t + self.min_temp return self.log_binomial_transform(p, t) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/layers/localbins_layers.py import torch import torch.nn as nn class SeedBinRegressor(nn.Module): def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10): super().__init__() self.version = '1_1' self.min_depth = min_depth self.max_depth = max_depth self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), nn.ReLU(inplace=True)) def forward(self, x): B = self._net(x) eps = 0.001 B = B + eps B_widths_normed = B / B.sum(dim=1, keepdim=True) B_widths = (self.max_depth - self.min_depth) * B_widths_normed B_widths = nn.functional.pad(B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth) B_edges = torch.cumsum(B_widths, dim=1) B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...]) return (B_widths_normed, B_centers) class SeedBinRegressorUnnormed(nn.Module): def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=0.001, max_depth=10): super().__init__() self.version = '1_1' self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, n_bins, 1, 1, 0), nn.Softplus()) def forward(self, x): B_centers = self._net(x) return (B_centers, B_centers) class Projector(nn.Module): def __init__(self, in_features, out_features, mlp_dim=128): super().__init__() self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.ReLU(inplace=True), nn.Conv2d(mlp_dim, out_features, 1, 1, 0)) def forward(self, x): return self._net(x) class LinearSplitter(nn.Module): def __init__(self, in_features, prev_nbins, split_factor=2, mlp_dim=128, min_depth=0.001, max_depth=10): super().__init__() self.prev_nbins = prev_nbins self.split_factor = split_factor self.min_depth = min_depth self.max_depth = max_depth self._net = nn.Sequential(nn.Conv2d(in_features, mlp_dim, 1, 1, 0), nn.GELU(), nn.Conv2d(mlp_dim, prev_nbins * split_factor, 1, 1, 0), nn.ReLU()) def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False): if prev_b_embedding is not None: if interpolate: prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True) x = x + prev_b_embedding S = self._net(x) eps = 0.001 S = S + eps (n, c, h, w) = S.shape S = S.view(n, self.prev_nbins, self.split_factor, h, w) S_normed = S / S.sum(dim=2, keepdim=True) b_prev = nn.functional.interpolate(b_prev, (h, w), mode='bilinear', align_corners=True) b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) b = b_prev.unsqueeze(2) * S_normed b = b.flatten(1, 2) B_widths = (self.max_depth - self.min_depth) * b B_widths = nn.functional.pad(B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth) B_edges = torch.cumsum(B_widths, dim=1) B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...]) return (b, B_centers) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/layers/patch_transformer.py import torch import torch.nn as nn class PatchTransformerEncoder(nn.Module): def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False): super(PatchTransformerEncoder, self).__init__() self.use_class_token = use_class_token encoder_layers = nn.TransformerEncoderLayer(embedding_dim, num_heads, dim_feedforward=1024) self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers=4) self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim, kernel_size=patch_size, stride=patch_size, padding=0) def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'): position = torch.arange(0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1) index = torch.arange(0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0) div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim)) pos_encoding = position * div_term pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1) pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1) return pos_encoding def forward(self, x): embeddings = self.embedding_convPxP(x).flatten(2) if self.use_class_token: embeddings = nn.functional.pad(embeddings, (1, 0)) embeddings = embeddings.permute(2, 0, 1) (S, N, E) = embeddings.shape embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device) x = self.transformer_encoder(embeddings) return x # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/model_io.py import torch def load_state_dict(model, state_dict): state_dict = state_dict.get('model', state_dict) do_prefix = isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)) state = {} for (k, v) in state_dict.items(): if k.startswith('module.') and (not do_prefix): k = k[7:] if not k.startswith('module.') and do_prefix: k = 'module.' + k state[k] = v model.load_state_dict(state) print('Loaded successfully') return model def load_wts(model, checkpoint_path): ckpt = torch.load(checkpoint_path, map_location='cpu') return load_state_dict(model, ckpt) def load_state_dict_from_url(model, url, **kwargs): state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs) return load_state_dict(model, state_dict) def load_state_from_resource(model, resource: str): print(f'Using pretrained resource {resource}') if resource.startswith('url::'): url = resource.split('url::')[1] return load_state_dict_from_url(model, url, progress=True) elif resource.startswith('local::'): path = resource.split('local::')[1] return load_wts(model, path) else: raise ValueError('Invalid resource type, only url:: and local:: are supported') # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/zoedepth/zoedepth_v1.py import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import Projector, SeedBinRegressor, SeedBinRegressorUnnormed from ..model_io import load_state_from_resource class ZoeDepth(DepthModel): def __init__(self, core, n_bins=64, bin_centers_type='softplus', bin_embedding_dim=128, min_depth=0.001, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): super().__init__() self.core = core self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if self.encoder_lr_factor <= 0: self.core.freeze_encoder(freeze_rel_pos=self.pos_enc_lr_factor <= 0) N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) if bin_centers_type == 'normed': SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == 'softplus': SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == 'hybrid1': SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == 'hybrid2': SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError("bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.seed_bin_regressor = SeedBinRegressorLayer(btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth) self.seed_projector = Projector(btlnck_features, bin_embedding_dim) self.projectors = nn.ModuleList([Projector(num_out, bin_embedding_dim) for num_out in num_out_features]) self.attractors = nn.ModuleList([Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type) for i in range(len(num_out_features))]) last_in = N_MIDAS_OUT + 1 self.conditional_log_binomial = ConditionalLogBinomial(last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp) def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): (b, c, h, w) = x.shape self.orig_input_width = w self.orig_input_height = h (rel_depth, out) = self.core(x, denorm=denorm, return_rel_depth=True) outconv_activation = out[0] btlnck = out[1] x_blocks = out[2:] x_d0 = self.conv2(btlnck) x = x_d0 (_, seed_b_centers) = self.seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - self.min_depth) / (self.max_depth - self.min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) for (projector, attractor, x) in zip(self.projectors, self.attractors, x_blocks): b_embedding = projector(x) (b, b_centers) = attractor(b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b.clone() prev_b_embedding = b_embedding.clone() last = outconv_activation if self.inverse_midas: rel_depth = 1.0 / (rel_depth + 1e-06) rel_depth = (rel_depth - rel_depth.min()) / (rel_depth.max() - rel_depth.min()) rel_cond = rel_depth.unsqueeze(1) rel_cond = nn.functional.interpolate(rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True) last = torch.cat([last, rel_cond], dim=1) b_embedding = nn.functional.interpolate(b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) x = self.conditional_log_binomial(last, b_embedding) b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): param_conf = [] if self.train_midas: if self.encoder_lr_factor > 0: param_conf.append({'params': self.core.get_enc_params_except_rel_pos(), 'lr': lr / self.encoder_lr_factor}) if self.pos_enc_lr_factor > 0: param_conf.append({'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor}) midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor param_conf.append({'params': midas_params, 'lr': lr / midas_lr_factor}) remaining_modules = [] for (name, child) in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain(*[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf @staticmethod def build(midas_model_type='DPT_BEiT_L_384', pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) model = ZoeDepth(core, **kwargs) if pretrained_resource: assert isinstance(pretrained_resource, str), 'pretrained_resource must be a string' model = load_state_from_resource(model, pretrained_resource) return model @staticmethod def build_from_config(config): return ZoeDepth.build(**config) # File: controlnet_aux-master/src/controlnet_aux/zoe/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import Projector, SeedBinRegressor, SeedBinRegressorUnnormed from ..layers.patch_transformer import PatchTransformerEncoder from ..model_io import load_state_from_resource class ZoeDepthNK(DepthModel): def __init__(self, core, bin_conf, bin_centers_type='softplus', bin_embedding_dim=128, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, memory_efficient=False, train_midas=True, is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): super().__init__() self.core = core self.bin_conf = bin_conf self.min_temp = min_temp self.max_temp = max_temp self.memory_efficient = memory_efficient self.train_midas = train_midas self.is_midas_pretrained = is_midas_pretrained self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.inverse_midas = inverse_midas N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] self.conv2 = nn.Conv2d(btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) self.patch_transformer = PatchTransformerEncoder(btlnck_features, 1, 128, use_class_token=True) self.mlp_classifier = nn.Sequential(nn.Linear(128, 128), nn.ReLU(), nn.Linear(128, 2)) if bin_centers_type == 'normed': SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == 'softplus': SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == 'hybrid1': SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == 'hybrid2': SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError("bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.bin_centers_type = bin_centers_type self.seed_bin_regressors = nn.ModuleDict({conf['name']: SeedBinRegressorLayer(btlnck_features, conf['n_bins'], mlp_dim=bin_embedding_dim // 2, min_depth=conf['min_depth'], max_depth=conf['max_depth']) for conf in bin_conf}) self.seed_projector = Projector(btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) self.projectors = nn.ModuleList([Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) for num_out in num_out_features]) self.attractors = nn.ModuleDict({conf['name']: nn.ModuleList([Attractor(bin_embedding_dim, n_attractors[i], mlp_dim=bin_embedding_dim, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type, memory_efficient=memory_efficient, min_depth=conf['min_depth'], max_depth=conf['max_depth']) for i in range(len(n_attractors))]) for conf in bin_conf}) last_in = N_MIDAS_OUT self.conditional_log_binomial = nn.ModuleDict({conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) for conf in bin_conf}) def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): (b, c, h, w) = x.shape self.orig_input_width = w self.orig_input_height = h (rel_depth, out) = self.core(x, denorm=denorm, return_rel_depth=True) outconv_activation = out[0] btlnck = out[1] x_blocks = out[2:] x_d0 = self.conv2(btlnck) x = x_d0 embedding = self.patch_transformer(x)[0] domain_logits = self.mlp_classifier(embedding) domain_vote = torch.softmax(domain_logits.sum(dim=0, keepdim=True), dim=-1) bin_conf_name = ['nyu', 'kitti'][torch.argmax(domain_vote, dim=-1).squeeze().item()] try: conf = [c for c in self.bin_conf if c.name == bin_conf_name][0] except IndexError: raise ValueError(f'bin_conf_name {bin_conf_name} not found in bin_confs') min_depth = conf['min_depth'] max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] (_, seed_b_centers) = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for (projector, attractor, x) in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) (b, b_centers) = attractor(b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate(b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate(b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): param_conf = [] if self.train_midas: def get_rel_pos_params(): for (name, p) in self.core.core.pretrained.named_parameters(): if 'relative_position' in name: yield p def get_enc_params_except_rel_pos(): for (name, p) in self.core.core.pretrained.named_parameters(): if 'relative_position' not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([{'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor}]) remaining_modules = [] for (name, child) in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain(*[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): params = [] for (name, child) in self.named_children(): if isinstance(child, nn.ModuleDict): for (bin_conf_name, module) in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): for (name, child) in self.named_children(): if isinstance(child, nn.ModuleDict): for (bin_conf_name, module) in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type='DPT_BEiT_L_384', pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs): core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas, train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs) model = ZoeDepthNK(core, **kwargs) if pretrained_resource: assert isinstance(pretrained_resource, str), 'pretrained_resource must be a string' model = load_state_from_resource(model, pretrained_resource) return model @staticmethod def build_from_config(config): return ZoeDepthNK.build(**config)