Spaces:
Running
on
L40S
Running
on
L40S
import cv2 | |
import numpy as np | |
import os | |
import trimesh | |
import argparse | |
import torch | |
import scipy | |
from PIL import Image | |
from refine.mesh_refine import geo_refine | |
from refine.func import make_star_cameras_orthographic | |
from refine.render import NormalsRenderer, calc_vertex_normals | |
from pytorch3d.structures import Meshes | |
from sklearn.neighbors import KDTree | |
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry | |
# sam = sam_model_registry["vit_h"](checkpoint="./ckpt/sam_vit_h_4b8939.pth").cuda() | |
# generator = SamAutomaticMaskGenerator( | |
# model=sam, | |
# points_per_side=64, | |
# pred_iou_thresh=0.80, | |
# stability_score_thresh=0.92, | |
# crop_n_layers=1, | |
# crop_n_points_downscale_factor=2, | |
# min_mask_region_area=100, | |
# ) | |
def fix_vert_color_glb(mesh_path): | |
from pygltflib import GLTF2, Material, PbrMetallicRoughness | |
obj1 = GLTF2().load(mesh_path) | |
obj1.meshes[0].primitives[0].material = 0 | |
obj1.materials.append(Material( | |
pbrMetallicRoughness = PbrMetallicRoughness( | |
baseColorFactor = [1.0, 1.0, 1.0, 1.0], | |
metallicFactor = 0., | |
roughnessFactor = 1.0, | |
), | |
emissiveFactor = [0.0, 0.0, 0.0], | |
doubleSided = True, | |
)) | |
obj1.save(mesh_path) | |
def srgb_to_linear(c_srgb): | |
c_linear = np.where(c_srgb <= 0.04045, c_srgb / 12.92, ((c_srgb + 0.055) / 1.055) ** 2.4) | |
return c_linear.clip(0, 1.) | |
import trimesh | |
import numpy as np | |
from pytorch3d.structures import Meshes | |
from pytorch3d.renderer import TexturesUV | |
def save_py3dmesh_with_trimesh_fast(meshes: Meshes, save_glb_path, apply_sRGB_to_LinearRGB=True): | |
# Convert from pytorch3d meshes to trimesh mesh | |
vertices = meshes.verts_packed().cpu().float().numpy() | |
triangles = meshes.faces_packed().cpu().long().numpy() | |
# Check if the mesh uses TexturesUV | |
if isinstance(meshes.textures, TexturesUV): | |
# Extract UV coordinates and texture map | |
verts_uvs = meshes.textures.verts_uvs_padded()[0].cpu().numpy() # UV coordinates (N, 2) | |
faces_uvs = meshes.textures.faces_uvs_padded()[0].cpu().numpy() # UV face indices (M, 3) | |
texture_map = meshes.textures.maps_padded()[0].cpu().numpy() # Texture map (H, W, 3 or 4) | |
# Convert texture map to trimesh-compatible format | |
if apply_sRGB_to_LinearRGB: | |
texture_map = srgb_to_linear(texture_map) | |
texture_map = np.clip(texture_map, 0, 1) # Ensure values are in [0, 1] | |
material = trimesh.visual.texture.SimpleMaterial(image=texture_data, diffuse=(255, 255, 255)) | |
# Create a trimesh.Trimesh object with UVs and texture | |
mesh = trimesh.Trimesh( | |
vertices=vertices, | |
faces=triangles, | |
visual=trimesh.visual.TextureVisuals( | |
uv=verts_uvs, # UV coordinates | |
image=texture_map, # Texture map | |
material=material # Material with texture | |
) | |
) | |
else: | |
# Fallback to vertex colors if TexturesUV is not used | |
np_color = meshes.textures.verts_features_packed().cpu().float().numpy() | |
if apply_sRGB_to_LinearRGB: | |
np_color = srgb_to_linear(np_color) | |
np_color = np.clip(np_color, 0, 1) | |
mesh = trimesh.Trimesh(vertices=vertices, faces=triangles, vertex_colors=np_color) | |
# Rotate 180 degrees along +Y if saving as GLB | |
if save_glb_path.endswith(".glb"): | |
vertices[:, [0, 2]] = -vertices[:, [0, 2]] | |
# Remove unreferenced vertices | |
mesh.remove_unreferenced_vertices() | |
# Save mesh | |
mesh.export(save_glb_path) | |
# if save_glb_path.endswith(".glb"): | |
# fix_vert_color_glb(save_glb_path) | |
print(f"Saving to {save_glb_path}") | |
def calc_horizontal_offset(target_img, source_img): | |
target_mask = target_img.astype(np.float32).sum(axis=-1) > 750 | |
source_mask = source_img.astype(np.float32).sum(axis=-1) > 750 | |
best_offset = -114514 | |
for offset in range(-200, 200): | |
offset_mask = np.roll(source_mask, offset, axis=1) | |
overlap = (target_mask & offset_mask).sum() | |
if overlap > best_offset: | |
best_offset = overlap | |
best_offset_value = offset | |
return best_offset_value | |
def calc_horizontal_offset2(target_mask, source_img): | |
source_mask = source_img.astype(np.float32).sum(axis=-1) > 750 | |
best_offset = -114514 | |
for offset in range(-200, 200): | |
offset_mask = np.roll(source_mask, offset, axis=1) | |
overlap = (target_mask & offset_mask).sum() | |
if overlap > best_offset: | |
best_offset = overlap | |
best_offset_value = offset | |
return best_offset_value | |
def get_distract_mask(color_0, color_1, normal_0=None, normal_1=None, thres=0.25, ratio=0.50, outside_thres=0.10, outside_ratio=0.20): | |
distract_area = np.abs(color_0 - color_1).sum(axis=-1) > thres | |
if normal_0 is not None and normal_1 is not None: | |
distract_area |= np.abs(normal_0 - normal_1).sum(axis=-1) > thres | |
labeled_array, num_features = scipy.ndimage.label(distract_area) | |
results = [] | |
random_sampled_points = [] | |
for i in range(num_features + 1): | |
if np.sum(labeled_array == i) > 1000 and np.sum(labeled_array == i) < 100000: | |
results.append((i, np.sum(labeled_array == i))) | |
# random sample a point in the area | |
points = np.argwhere(labeled_array == i) | |
random_sampled_points.append(points[np.random.randint(0, points.shape[0])]) | |
results = sorted(results, key=lambda x: x[1], reverse=True) # [1:] | |
distract_mask = np.zeros_like(distract_area) | |
distract_bbox = np.zeros_like(distract_area) | |
for i, _ in results: | |
distract_mask |= labeled_array == i | |
bbox = np.argwhere(labeled_array == i) | |
min_x, min_y = bbox.min(axis=0) | |
max_x, max_y = bbox.max(axis=0) | |
distract_bbox[min_x:max_x, min_y:max_y] = 1 | |
return distract_mask, distract_bbox, _, _ | |
if __name__ == '__main__': | |
parser = argparse.ArgumentParser() | |
parser.add_argument('--input_mv_dir', type=str, default='result/multiview') | |
parser.add_argument('--input_obj_dir', type=str, default='result/slrm') | |
parser.add_argument('--output_dir', type=str, default='result/refined') | |
parser.add_argument('--outside_ratio', type=float, default=0.20) | |
parser.add_argument('--no_decompose', action='store_true') | |
args = parser.parse_args() | |
import time | |
start_time = time.time() | |
for test_idx in os.listdir(args.input_mv_dir): | |
mv_root_dir = os.path.join(args.input_mv_dir, test_idx) | |
obj_dir = os.path.join(args.input_obj_dir, test_idx) | |
fixed_v, fixed_f = None, None | |
flow_vert, flow_vector = None, None | |
last_colors, last_normals = None, None | |
last_front_color, last_front_normal = None, None | |
distract_mask = None | |
mv, proj = make_star_cameras_orthographic(8, 1, r=1.2) | |
mv = mv[[4, 3, 2, 0, 6, 5]] | |
renderer = NormalsRenderer(mv,proj,(1024,1024)) | |
if not args.no_decompose: | |
for name_idx, level in zip([3, 1, 2], [2, 1, 0]): | |
mesh = trimesh.load(obj_dir + f'_{name_idx}.obj') | |
new_mesh = mesh.split(only_watertight=False) | |
new_mesh = [ j for j in new_mesh if len(j.vertices) >= 300 ] | |
mesh = trimesh.Scene(new_mesh).dump(concatenate=True) | |
mesh_v, mesh_f = mesh.vertices, mesh.faces | |
if last_colors is None: | |
images = renderer.render( | |
torch.tensor(mesh_v, device='cuda').float(), | |
torch.ones_like(torch.from_numpy(mesh_v), device='cuda').float(), | |
torch.tensor(mesh_f, device='cuda'), | |
) | |
mask = (images[..., 3] < 0.9).cpu().numpy() | |
colors, normals = [], [] | |
for i in range(6): | |
color_path = os.path.join(mv_root_dir, f'level{level}', f'color_{i}.png') | |
normal_path = os.path.join(mv_root_dir, f'level{level}', f'normal_{i}.png') | |
color = cv2.imread(color_path) | |
normal = cv2.imread(normal_path) | |
color = color[..., ::-1] | |
normal = normal[..., ::-1] | |
if last_colors is not None: | |
offset = calc_horizontal_offset(np.array(last_colors[i]), color) | |
# print('offset', i, offset) | |
else: | |
offset = calc_horizontal_offset2(mask[i], color) | |
# print('init offset', i, offset) | |
if offset != 0: | |
color = np.roll(color, offset, axis=1) | |
normal = np.roll(normal, offset, axis=1) | |
color = Image.fromarray(color) | |
normal = Image.fromarray(normal) | |
colors.append(color) | |
normals.append(normal) | |
if last_front_color is not None and level == 0: | |
distract_mask, distract_bbox, _, _ = get_distract_mask(last_front_color, np.array(colors[0]).astype(np.float32) / 255.0, outside_ratio=args.outside_ratio) | |
cv2.imwrite(f'{args.output_dir}/{test_idx}/distract_mask.png', distract_mask.astype(np.uint8) * 255) | |
else: | |
distract_mask = None | |
distract_bbox = None | |
last_front_color = np.array(colors[0]).astype(np.float32) / 255.0 | |
last_front_normal = np.array(normals[0]).astype(np.float32) / 255.0 | |
if last_colors is None: | |
from copy import deepcopy | |
last_colors, last_normals = deepcopy(colors), deepcopy(normals) | |
# my mesh flow weight by nearest vertexs | |
if fixed_v is not None and fixed_f is not None and level == 1: | |
t = trimesh.Trimesh(vertices=mesh_v, faces=mesh_f) | |
fixed_v_cpu = fixed_v.cpu().numpy() | |
kdtree_anchor = KDTree(fixed_v_cpu) | |
kdtree_mesh_v = KDTree(mesh_v) | |
_, idx_anchor = kdtree_anchor.query(mesh_v, k=1) | |
_, idx_mesh_v = kdtree_mesh_v.query(mesh_v, k=25) | |
idx_anchor = idx_anchor.squeeze() | |
neighbors = torch.tensor(mesh_v).cuda()[idx_mesh_v] # V, 25, 3 | |
# calculate the distances neighbors [V, 25, 3]; mesh_v [V, 3] -> [V, 25] | |
neighbor_dists = torch.norm(neighbors - torch.tensor(mesh_v).cuda()[:, None], dim=-1) | |
neighbor_dists[neighbor_dists > 0.06] = 114514. | |
neighbor_weights = torch.exp(-neighbor_dists * 1.) | |
neighbor_weights = neighbor_weights / neighbor_weights.sum(dim=1, keepdim=True) | |
anchors = fixed_v[idx_anchor] # V, 3 | |
anchor_normals = calc_vertex_normals(fixed_v, fixed_f)[idx_anchor] # V, 3 | |
dis_anchor = torch.clamp(((anchors - torch.tensor(mesh_v).cuda()) * anchor_normals).sum(-1), min=0) + 0.01 | |
vec_anchor = dis_anchor[:, None] * anchor_normals # V, 3 | |
vec_anchor = vec_anchor[idx_mesh_v] # V, 25, 3 | |
weighted_vec_anchor = (vec_anchor * neighbor_weights[:, :, None]).sum(1) # V, 3 | |
mesh_v += weighted_vec_anchor.cpu().numpy() | |
t = trimesh.Trimesh(vertices=mesh_v, faces=mesh_f) | |
mesh_v = torch.tensor(mesh_v, device='cuda', dtype=torch.float32) | |
mesh_f = torch.tensor(mesh_f, device='cuda') | |
new_mesh, simp_v, simp_f = geo_refine(mesh_v, mesh_f, colors, normals, fixed_v=fixed_v, fixed_f=fixed_f, distract_mask=distract_mask, distract_bbox=distract_bbox) | |
# my mesh flow weight by nearest vertexs | |
try: | |
if fixed_v is not None and fixed_f is not None and level != 0: | |
new_mesh_v = new_mesh.vertices.copy() | |
fixed_v_cpu = fixed_v.cpu().numpy() | |
kdtree_anchor = KDTree(fixed_v_cpu) | |
kdtree_mesh_v = KDTree(new_mesh_v) | |
_, idx_anchor = kdtree_anchor.query(new_mesh_v, k=1) | |
_, idx_mesh_v = kdtree_mesh_v.query(new_mesh_v, k=25) | |
idx_anchor = idx_anchor.squeeze() | |
neighbors = torch.tensor(new_mesh_v).cuda()[idx_mesh_v] # V, 25, 3 | |
# calculate the distances neighbors [V, 25, 3]; new_mesh_v [V, 3] -> [V, 25] | |
neighbor_dists = torch.norm(neighbors - torch.tensor(new_mesh_v).cuda()[:, None], dim=-1) | |
neighbor_dists[neighbor_dists > 0.06] = 114514. | |
neighbor_weights = torch.exp(-neighbor_dists * 1.) | |
neighbor_weights = neighbor_weights / neighbor_weights.sum(dim=1, keepdim=True) | |
anchors = fixed_v[idx_anchor] # V, 3 | |
anchor_normals = calc_vertex_normals(fixed_v, fixed_f)[idx_anchor] # V, 3 | |
dis_anchor = torch.clamp(((anchors - torch.tensor(new_mesh_v).cuda()) * anchor_normals).sum(-1), min=0) + 0.01 | |
vec_anchor = dis_anchor[:, None] * anchor_normals # V, 3 | |
vec_anchor = vec_anchor[idx_mesh_v] # V, 25, 3 | |
weighted_vec_anchor = (vec_anchor * neighbor_weights[:, :, None]).sum(1) # V, 3 | |
new_mesh_v += weighted_vec_anchor.cpu().numpy() | |
new_mesh.vertices = new_mesh_v | |
except Exception as e: | |
pass | |
os.makedirs(f'{args.output_dir}/{test_idx}', exist_ok=True) | |
new_mesh.export(f'{args.output_dir}/{test_idx}/out_{level}.glb') | |
if fixed_v is None: | |
fixed_v, fixed_f = simp_v, simp_f | |
else: | |
fixed_f = torch.cat([fixed_f, simp_f + fixed_v.shape[0]], dim=0) | |
fixed_v = torch.cat([fixed_v, simp_v], dim=0) | |
# input("Press Enter to continue...") | |
print('finish', time.time() - start_time) | |
else: | |
mesh = trimesh.load(obj_dir + f'_0.obj') | |
mesh_v, mesh_f = mesh.vertices, mesh.faces | |
images = renderer.render( | |
torch.tensor(mesh_v, device='cuda').float(), | |
torch.ones_like(torch.from_numpy(mesh_v), device='cuda').float(), | |
torch.tensor(mesh_f, device='cuda'), | |
) | |
mask = (images[..., 3] < 0.9).cpu().numpy() | |
colors, normals = [], [] | |
for i in range(6): | |
color_path = os.path.join(mv_root_dir, f'level0', f'color_{i}.png') | |
normal_path = os.path.join(mv_root_dir, f'level0', f'normal_{i}.png') | |
color = cv2.imread(color_path) | |
normal = cv2.imread(normal_path) | |
color = color[..., ::-1] | |
normal = normal[..., ::-1] | |
offset = calc_horizontal_offset2(mask[i], color) | |
if offset != 0: | |
color = np.roll(color, offset, axis=1) | |
normal = np.roll(normal, offset, axis=1) | |
color = Image.fromarray(color) | |
normal = Image.fromarray(normal) | |
colors.append(color) | |
normals.append(normal) | |
mesh_v = torch.tensor(mesh_v, device='cuda', dtype=torch.float32) | |
mesh_f = torch.tensor(mesh_f, device='cuda') | |
new_mesh, _, _ = geo_refine(mesh_v, mesh_f, colors, normals, no_decompose=True, expansion_weight=0.) | |
os.makedirs(f'{args.output_dir}/{test_idx}', exist_ok=True) | |
save_py3dmesh_with_trimesh_fast(new_mesh, f'{args.output_dir}/{test_idx}/out_nodecomp.glb', apply_sRGB_to_LinearRGB=False) | |