|
""" |
|
This file contains functions that are used to perform data augmentation. |
|
""" |
|
from turtle import reset |
|
import cv2 |
|
import io |
|
import torch |
|
import numpy as np |
|
import scipy.misc |
|
from PIL import Image |
|
from rembg.bg import remove |
|
from torchvision.models import detection |
|
|
|
from lib.pymaf.core import constants |
|
from lib.pymaf.utils.streamer import aug_matrix |
|
from lib.common.cloth_extraction import load_segmentation |
|
from torchvision import transforms |
|
|
|
|
|
def load_img(img_file): |
|
|
|
img = cv2.imread(img_file, cv2.IMREAD_UNCHANGED) |
|
if len(img.shape) == 2: |
|
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) |
|
|
|
if not img_file.endswith("png"): |
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
else: |
|
img = cv2.cvtColor(img, cv2.COLOR_RGBA2BGR) |
|
|
|
return img |
|
|
|
|
|
def get_bbox(img, det): |
|
|
|
input = np.float32(img) |
|
input = (input / 255.0 - |
|
(0.5, 0.5, 0.5)) / (0.5, 0.5, 0.5) |
|
input = input.transpose(2, 0, 1) |
|
bboxes, probs = det(torch.from_numpy(input).float().unsqueeze(0)) |
|
|
|
probs = probs.unsqueeze(3) |
|
bboxes = (bboxes * probs).sum(dim=1, keepdim=True) / probs.sum( |
|
dim=1, keepdim=True) |
|
bbox = bboxes[0, 0, 0].cpu().numpy() |
|
|
|
return bbox |
|
|
|
|
|
def get_transformer(input_res): |
|
|
|
image_to_tensor = transforms.Compose([ |
|
transforms.Resize(input_res), |
|
transforms.ToTensor(), |
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) |
|
]) |
|
|
|
mask_to_tensor = transforms.Compose([ |
|
transforms.Resize(input_res), |
|
transforms.ToTensor(), |
|
transforms.Normalize((0.0, ), (1.0, )) |
|
]) |
|
|
|
image_to_pymaf_tensor = transforms.Compose([ |
|
transforms.Resize(size=224), |
|
transforms.Normalize(mean=constants.IMG_NORM_MEAN, |
|
std=constants.IMG_NORM_STD) |
|
]) |
|
|
|
image_to_pixie_tensor = transforms.Compose([transforms.Resize(224)]) |
|
|
|
def image_to_hybrik_tensor(img): |
|
|
|
img[0].add_(-0.406) |
|
img[1].add_(-0.457) |
|
img[2].add_(-0.480) |
|
|
|
|
|
img[0].div_(0.225) |
|
img[1].div_(0.224) |
|
img[2].div_(0.229) |
|
return img |
|
|
|
return [ |
|
image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, |
|
image_to_pixie_tensor, image_to_hybrik_tensor |
|
] |
|
|
|
|
|
def process_image(img_file, |
|
hps_type, |
|
input_res=512, |
|
device=None, |
|
seg_path=None): |
|
"""Read image, do preprocessing and possibly crop it according to the bounding box. |
|
If there are bounding box annotations, use them to crop the image. |
|
If no bounding box is specified but openpose detections are available, use them to get the bounding box. |
|
""" |
|
|
|
[ |
|
image_to_tensor, mask_to_tensor, image_to_pymaf_tensor, |
|
image_to_pixie_tensor, image_to_hybrik_tensor |
|
] = get_transformer(input_res) |
|
|
|
img_ori = load_img(img_file) |
|
|
|
in_height, in_width, _ = img_ori.shape |
|
M = aug_matrix(in_width, in_height, input_res * 2, input_res * 2) |
|
|
|
|
|
img_for_crop = cv2.warpAffine(img_ori, |
|
M[0:2, :], (input_res * 2, input_res * 2), |
|
flags=cv2.INTER_CUBIC) |
|
|
|
|
|
detector = detection.maskrcnn_resnet50_fpn(pretrained=True) |
|
detector.eval() |
|
predictions = detector( |
|
[torch.from_numpy(img_for_crop).permute(2, 0, 1) / 255.])[0] |
|
human_ids = torch.logical_and( |
|
predictions["labels"] == 1, |
|
predictions["scores"] == predictions["scores"].max()).nonzero().squeeze(1) |
|
bbox = predictions["boxes"][human_ids, :].flatten().detach().cpu().numpy() |
|
|
|
width = bbox[2] - bbox[0] |
|
height = bbox[3] - bbox[1] |
|
center = np.array([(bbox[0] + bbox[2]) / 2.0, |
|
(bbox[1] + bbox[3]) / 2.0]) |
|
|
|
scale = max(height, width) / 180 |
|
|
|
if hps_type == 'hybrik': |
|
img_np = crop_for_hybrik(img_for_crop, center, |
|
np.array([scale * 180, scale * 180])) |
|
else: |
|
img_np, cropping_parameters = crop(img_for_crop, center, scale, |
|
(input_res, input_res)) |
|
|
|
with torch.no_grad(): |
|
buf = io.BytesIO() |
|
Image.fromarray(img_np).save(buf, format='png') |
|
img_pil = Image.open(io.BytesIO(remove( |
|
buf.getvalue()))).convert("RGBA") |
|
|
|
|
|
img_rgb = image_to_tensor(img_pil.convert("RGB")) |
|
img_mask = torch.tensor(1.0) - (mask_to_tensor(img_pil.split()[-1]) < |
|
torch.tensor(0.5)).float() |
|
img_tensor = img_rgb * img_mask |
|
|
|
|
|
img_hps = img_np.astype(np.float32) / 255. |
|
img_hps = torch.from_numpy(img_hps).permute(2, 0, 1) |
|
|
|
if hps_type == 'bev': |
|
img_hps = img_np[:, :, [2, 1, 0]] |
|
elif hps_type == 'hybrik': |
|
img_hps = image_to_hybrik_tensor(img_hps).unsqueeze(0).to(device) |
|
elif hps_type != 'pixie': |
|
img_hps = image_to_pymaf_tensor(img_hps).unsqueeze(0).to(device) |
|
else: |
|
img_hps = image_to_pixie_tensor(img_hps).unsqueeze(0).to(device) |
|
|
|
|
|
uncrop_param = { |
|
'center': center, |
|
'scale': scale, |
|
'ori_shape': img_ori.shape, |
|
'box_shape': img_np.shape, |
|
'crop_shape': img_for_crop.shape, |
|
'M': M |
|
} |
|
|
|
if not (seg_path is None): |
|
segmentations = load_segmentation(seg_path, (in_height, in_width)) |
|
seg_coord_normalized = [] |
|
for seg in segmentations: |
|
coord_normalized = [] |
|
for xy in seg['coordinates']: |
|
xy_h = np.vstack((xy[:, 0], xy[:, 1], np.ones(len(xy)))).T |
|
warped_indeces = M[0:2, :] @ xy_h[:, :, None] |
|
warped_indeces = np.array(warped_indeces).astype(int) |
|
warped_indeces.resize((warped_indeces.shape[:2])) |
|
|
|
|
|
cropped_indeces = crop_segmentation(warped_indeces, |
|
(input_res, input_res), |
|
cropping_parameters) |
|
|
|
indices = np.vstack( |
|
(cropped_indeces[:, 0], cropped_indeces[:, 1])).T |
|
|
|
|
|
seg_cropped_normalized = 2 * (indices / input_res) - 1 |
|
|
|
|
|
seg_cropped_normalized[:, |
|
0] = (1 / |
|
40) * seg_cropped_normalized[:, 0] |
|
seg_cropped_normalized[:, |
|
1] = (1 / |
|
50) * seg_cropped_normalized[:, 1] |
|
coord_normalized.append(seg_cropped_normalized) |
|
|
|
seg['coord_normalized'] = coord_normalized |
|
seg_coord_normalized.append(seg) |
|
|
|
return img_tensor, img_hps, img_ori, img_mask, uncrop_param, seg_coord_normalized |
|
|
|
return img_tensor, img_hps, img_ori, img_mask, uncrop_param |
|
|
|
|
|
def get_transform(center, scale, res): |
|
"""Generate transformation matrix.""" |
|
h = 200 * scale |
|
t = np.zeros((3, 3)) |
|
t[0, 0] = float(res[1]) / h |
|
t[1, 1] = float(res[0]) / h |
|
t[0, 2] = res[1] * (-float(center[0]) / h + .5) |
|
t[1, 2] = res[0] * (-float(center[1]) / h + .5) |
|
t[2, 2] = 1 |
|
|
|
return t |
|
|
|
|
|
def transform(pt, center, scale, res, invert=0): |
|
"""Transform pixel location to different reference.""" |
|
t = get_transform(center, scale, res) |
|
if invert: |
|
t = np.linalg.inv(t) |
|
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T |
|
new_pt = np.dot(t, new_pt) |
|
return np.around(new_pt[:2]).astype(np.int16) |
|
|
|
|
|
def crop(img, center, scale, res): |
|
"""Crop image according to the supplied bounding box.""" |
|
|
|
|
|
ul = np.array(transform([0, 0], center, scale, res, invert=1)) |
|
|
|
|
|
br = np.array(transform(res, center, scale, res, invert=1)) |
|
|
|
new_shape = [br[1] - ul[1], br[0] - ul[0]] |
|
if len(img.shape) > 2: |
|
new_shape += [img.shape[2]] |
|
new_img = np.zeros(new_shape) |
|
|
|
|
|
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0] |
|
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1] |
|
|
|
|
|
old_x = max(0, ul[0]), min(len(img[0]), br[0]) |
|
old_y = max(0, ul[1]), min(len(img), br[1]) |
|
|
|
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], |
|
old_x[0]:old_x[1]] |
|
if len(img.shape) == 2: |
|
new_img = np.array(Image.fromarray(new_img).resize(res)) |
|
else: |
|
new_img = np.array( |
|
Image.fromarray(new_img.astype(np.uint8)).resize(res)) |
|
|
|
return new_img, (old_x, new_x, old_y, new_y, new_shape) |
|
|
|
|
|
def crop_segmentation(org_coord, res, cropping_parameters): |
|
old_x, new_x, old_y, new_y, new_shape = cropping_parameters |
|
|
|
new_coord = np.zeros((org_coord.shape)) |
|
new_coord[:, 0] = new_x[0] + (org_coord[:, 0] - old_x[0]) |
|
new_coord[:, 1] = new_y[0] + (org_coord[:, 1] - old_y[0]) |
|
|
|
new_coord[:, 0] = res[0] * (new_coord[:, 0] / new_shape[1]) |
|
new_coord[:, 1] = res[1] * (new_coord[:, 1] / new_shape[0]) |
|
|
|
return new_coord |
|
|
|
|
|
def crop_for_hybrik(img, center, scale): |
|
inp_h, inp_w = (256, 256) |
|
trans = get_affine_transform(center, scale, 0, [inp_w, inp_h]) |
|
new_img = cv2.warpAffine(img, |
|
trans, (int(inp_w), int(inp_h)), |
|
flags=cv2.INTER_LINEAR) |
|
return new_img |
|
|
|
|
|
def get_affine_transform(center, |
|
scale, |
|
rot, |
|
output_size, |
|
shift=np.array([0, 0], dtype=np.float32), |
|
inv=0): |
|
|
|
def get_dir(src_point, rot_rad): |
|
"""Rotate the point by `rot_rad` degree.""" |
|
sn, cs = np.sin(rot_rad), np.cos(rot_rad) |
|
|
|
src_result = [0, 0] |
|
src_result[0] = src_point[0] * cs - src_point[1] * sn |
|
src_result[1] = src_point[0] * sn + src_point[1] * cs |
|
|
|
return src_result |
|
|
|
def get_3rd_point(a, b): |
|
"""Return vector c that perpendicular to (a - b).""" |
|
direct = a - b |
|
return b + np.array([-direct[1], direct[0]], dtype=np.float32) |
|
|
|
if not isinstance(scale, np.ndarray) and not isinstance(scale, list): |
|
scale = np.array([scale, scale]) |
|
|
|
scale_tmp = scale |
|
src_w = scale_tmp[0] |
|
dst_w = output_size[0] |
|
dst_h = output_size[1] |
|
|
|
rot_rad = np.pi * rot / 180 |
|
src_dir = get_dir([0, src_w * -0.5], rot_rad) |
|
dst_dir = np.array([0, dst_w * -0.5], np.float32) |
|
|
|
src = np.zeros((3, 2), dtype=np.float32) |
|
dst = np.zeros((3, 2), dtype=np.float32) |
|
src[0, :] = center + scale_tmp * shift |
|
src[1, :] = center + src_dir + scale_tmp * shift |
|
dst[0, :] = [dst_w * 0.5, dst_h * 0.5] |
|
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir |
|
|
|
src[2:, :] = get_3rd_point(src[0, :], src[1, :]) |
|
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :]) |
|
|
|
if inv: |
|
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src)) |
|
else: |
|
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst)) |
|
|
|
return trans |
|
|
|
|
|
def corner_align(ul, br): |
|
|
|
if ul[1] - ul[0] != br[1] - br[0]: |
|
ul[1] = ul[0] + br[1] - br[0] |
|
|
|
return ul, br |
|
|
|
|
|
def uncrop(img, center, scale, orig_shape): |
|
"""'Undo' the image cropping/resizing. |
|
This function is used when evaluating mask/part segmentation. |
|
""" |
|
|
|
res = img.shape[:2] |
|
|
|
|
|
ul = np.array(transform([0, 0], center, scale, res, invert=1)) |
|
|
|
br = np.array(transform(res, center, scale, res, invert=1)) |
|
|
|
|
|
ul, br = corner_align(ul, br) |
|
|
|
|
|
crop_shape = [br[1] - ul[1], br[0] - ul[0]] |
|
new_img = np.zeros(orig_shape, dtype=np.uint8) |
|
|
|
|
|
new_x = max(0, -ul[0]), min(br[0], orig_shape[1]) - ul[0] |
|
new_y = max(0, -ul[1]), min(br[1], orig_shape[0]) - ul[1] |
|
|
|
|
|
old_x = max(0, ul[0]), min(orig_shape[1], br[0]) |
|
old_y = max(0, ul[1]), min(orig_shape[0], br[1]) |
|
|
|
img = np.array(Image.fromarray(img.astype(np.uint8)).resize(crop_shape)) |
|
|
|
new_img[old_y[0]:old_y[1], old_x[0]:old_x[1]] = img[new_y[0]:new_y[1], |
|
new_x[0]:new_x[1]] |
|
|
|
return new_img |
|
|
|
|
|
def rot_aa(aa, rot): |
|
"""Rotate axis angle parameters.""" |
|
|
|
R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0], |
|
[np.sin(np.deg2rad(-rot)), |
|
np.cos(np.deg2rad(-rot)), 0], [0, 0, 1]]) |
|
|
|
per_rdg, _ = cv2.Rodrigues(aa) |
|
|
|
resrot, _ = cv2.Rodrigues(np.dot(R, per_rdg)) |
|
aa = (resrot.T)[0] |
|
return aa |
|
|
|
|
|
def flip_img(img): |
|
"""Flip rgb images or masks. |
|
channels come last, e.g. (256,256,3). |
|
""" |
|
img = np.fliplr(img) |
|
return img |
|
|
|
|
|
def flip_kp(kp, is_smpl=False): |
|
"""Flip keypoints.""" |
|
if len(kp) == 24: |
|
if is_smpl: |
|
flipped_parts = constants.SMPL_JOINTS_FLIP_PERM |
|
else: |
|
flipped_parts = constants.J24_FLIP_PERM |
|
elif len(kp) == 49: |
|
if is_smpl: |
|
flipped_parts = constants.SMPL_J49_FLIP_PERM |
|
else: |
|
flipped_parts = constants.J49_FLIP_PERM |
|
kp = kp[flipped_parts] |
|
kp[:, 0] = -kp[:, 0] |
|
return kp |
|
|
|
|
|
def flip_pose(pose): |
|
"""Flip pose. |
|
The flipping is based on SMPL parameters. |
|
""" |
|
flipped_parts = constants.SMPL_POSE_FLIP_PERM |
|
pose = pose[flipped_parts] |
|
|
|
pose[1::3] = -pose[1::3] |
|
pose[2::3] = -pose[2::3] |
|
return pose |
|
|
|
|
|
def normalize_2d_kp(kp_2d, crop_size=224, inv=False): |
|
|
|
if not inv: |
|
ratio = 1.0 / crop_size |
|
kp_2d = 2.0 * kp_2d * ratio - 1.0 |
|
else: |
|
ratio = 1.0 / crop_size |
|
kp_2d = (kp_2d + 1.0) / (2 * ratio) |
|
|
|
return kp_2d |
|
|
|
|
|
def generate_heatmap(joints, heatmap_size, sigma=1, joints_vis=None): |
|
''' |
|
param joints: [num_joints, 3] |
|
param joints_vis: [num_joints, 3] |
|
return: target, target_weight(1: visible, 0: invisible) |
|
''' |
|
num_joints = joints.shape[0] |
|
device = joints.device |
|
cur_device = torch.device(device.type, device.index) |
|
if not hasattr(heatmap_size, '__len__'): |
|
|
|
heatmap_size = [heatmap_size, heatmap_size] |
|
assert len(heatmap_size) == 2 |
|
target_weight = np.ones((num_joints, 1), dtype=np.float32) |
|
if joints_vis is not None: |
|
target_weight[:, 0] = joints_vis[:, 0] |
|
target = torch.zeros((num_joints, heatmap_size[1], heatmap_size[0]), |
|
dtype=torch.float32, |
|
device=cur_device) |
|
|
|
tmp_size = sigma * 3 |
|
|
|
for joint_id in range(num_joints): |
|
mu_x = int(joints[joint_id][0] * heatmap_size[0] + 0.5) |
|
mu_y = int(joints[joint_id][1] * heatmap_size[1] + 0.5) |
|
|
|
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)] |
|
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)] |
|
if ul[0] >= heatmap_size[0] or ul[1] >= heatmap_size[1] \ |
|
or br[0] < 0 or br[1] < 0: |
|
|
|
target_weight[joint_id] = 0 |
|
continue |
|
|
|
|
|
size = 2 * tmp_size + 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = torch.arange(0, size, dtype=torch.float32, device=cur_device) |
|
y = x.unsqueeze(-1) |
|
x0 = y0 = size // 2 |
|
|
|
g = torch.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2)) |
|
|
|
|
|
g_x = max(0, -ul[0]), min(br[0], heatmap_size[0]) - ul[0] |
|
g_y = max(0, -ul[1]), min(br[1], heatmap_size[1]) - ul[1] |
|
|
|
img_x = max(0, ul[0]), min(br[0], heatmap_size[0]) |
|
img_y = max(0, ul[1]), min(br[1], heatmap_size[1]) |
|
|
|
v = target_weight[joint_id] |
|
if v > 0.5: |
|
target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \ |
|
g[g_y[0]:g_y[1], g_x[0]:g_x[1]] |
|
|
|
return target, target_weight |
|
|