import argparse import os import json from math import radians import bpy import numpy as np COLOR_SPACES = ["display", "linear"] DEVICES = ["cpu", "cuda", "optix"] def listify_matrix(matrix): matrix_list = [] for row in matrix: matrix_list.append(list(row)) return matrix_list def parent_obj_to_camera(b_camera, origin): b_empty = bpy.data.objects.new("Empty", None) b_empty.location = origin b_camera.parent = b_empty scn = bpy.context.scene scn.collection.objects.link(b_empty) bpy.context.view_layer.objects.active = b_empty return b_empty def main(args): bpy.ops.wm.open_mainfile(filepath=args.blend_path) scene = bpy.data.scenes["Scene"] scene.render.engine = "CYCLES" scene.render.use_persistent_data = True scene.cycles.samples = 256 bpy.context.scene.unit_settings.scale_length = 0.01 if args.device == "cpu": bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "NONE" bpy.context.scene.cycles.device = "CPU" elif args.device == "cuda": bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "CUDA" bpy.context.scene.cycles.device = "GPU" elif args.device == "optix": bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "OPTIX" bpy.context.scene.cycles.device = "GPU" bpy.context.preferences.addons["cycles"].preferences.get_devices() scene.view_layers[0].use_pass_combined = True scene.use_nodes = True tree = scene.node_tree if args.depth: scene.view_layers[0].use_pass_z = True combine_color = tree.nodes.new("CompositorNodeCombineColor") depth_output = tree.nodes.new("CompositorNodeOutputFile") if args.normal: scene.view_layers[0].use_pass_normal = True normal_output = tree.nodes.new("CompositorNodeOutputFile") if args.depth or args.normal: render_layers = tree.nodes.new("CompositorNodeRLayers") scene.render.filepath = args.renders_path scene.render.use_file_extension = True scene.render.use_overwrite = True scene.render.image_settings.color_mode = "RGBA" if args.color_space == "display": scene.render.image_settings.file_format = "PNG" scene.render.image_settings.color_depth = "8" scene.render.image_settings.color_management = "FOLLOW_SCENE" elif args.color_space == "linear": scene.render.image_settings.file_format = "OPEN_EXR" scene.render.image_settings.color_depth = "32" if args.depth: depth_output.base_path = os.path.join(args.renders_path, "depth") depth_output.file_slots[0].use_node_format = True scene.frame_set(0) depth_output.format.file_format = "OPEN_EXR" depth_output.format.color_mode = "RGB" depth_output.format.color_depth = "32" depth_output.format.exr_codec = "NONE" links = tree.links combine_color.mode = "RGB" links.new(render_layers.outputs["Depth"], combine_color.inputs["Red"]) combine_color.inputs["Green"].default_value = 0 combine_color.inputs["Blue"].default_value = 0 combine_color.inputs["Alpha"].default_value = 1 links.new(combine_color.outputs["Image"], depth_output.inputs["Image"]) if args.normal: normal_output.base_path = os.path.join(args.renders_path, "normal") normal_output.file_slots[0].use_node_format = True scene.frame_set(0) normal_output.format.file_format = "OPEN_EXR" normal_output.format.color_mode = "RGB" normal_output.format.color_depth = "32" normal_output.format.exr_codec = "NONE" links = tree.links combine_color.mode = "RGB" links.new(render_layers.outputs["Normal"], normal_output.inputs["Image"]) scene.render.dither_intensity = 0.0 scene.render.film_transparent = True scene.render.resolution_percentage = 100 scene.render.resolution_x = args.resolution[0] scene.render.resolution_y = args.resolution[1] cam = bpy.data.objects["Camera"] cam.location = (4.0, -214.736, 120.0) cam.rotation_mode = "XYZ" cam_constraint = cam.constraints.new(type="TRACK_TO") cam_constraint.track_axis = "TRACK_NEGATIVE_Z" cam_constraint.up_axis = "UP_Y" b_empty = parent_obj_to_camera(cam, (0, 0, 100.0)) cam_constraint.target = b_empty args.renders_path = os.path.normpath(args.renders_path) folder_name = os.path.basename(args.renders_path) renders_parent_path = os.path.dirname(args.renders_path) transforms_path = os.path.join(renders_parent_path, f"transforms_{folder_name}.json") stepsize = 360.0 / args.num_views out_data = { "camera_angle_x": cam.data.angle_x, "frames": [] } for i in range(args.num_views): if args.random_views: if args.upper_views: # 从上半球随机采样视图 # 限制 x 轴(pitch)的旋转范围以避免向下拍摄 pitch = radians(np.random.uniform(-20.0 , 30.0)) # 限制俯仰角在 0 到 90 度之间 yaw = radians(np.random.uniform(0, 360)) # 随机偏航角 b_empty.rotation_euler = (pitch, 0, yaw) else: # 完全随机采样视图 b_empty.rotation_euler = ( radians(np.random.uniform(0, 180)), 0, radians(np.random.uniform(0, 360)) ) else: # 等间隔采样视图 b_empty.rotation_euler[2] = radians(i * stepsize) scene.render.filepath = os.path.join(args.renders_path, f"r_{i}") if args.depth: depth_output.file_slots[0].path = f"r_{i}" if args.normal: normal_output.file_slots[0].path = f"r_{i}" bpy.ops.render.render(write_still=True) if args.depth: os.rename(os.path.join(depth_output.base_path, f"r_{i}0000.exr"), os.path.join(depth_output.base_path, f"r_{i}.exr")) if args.normal: os.rename(os.path.join(normal_output.base_path, f"r_{i}0000.exr"), os.path.join(normal_output.base_path, f"r_{i}.exr")) frame_data = { "file_path": os.path.join(".", os.path.relpath(scene.render.filepath, start=renders_parent_path)), "rotation": radians(i * stepsize), "transform_matrix": listify_matrix(cam.matrix_world) } out_data["frames"].append(frame_data) with open(transforms_path, "w") as out_file: json.dump(out_data, out_file, indent=4) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Script for rendering novel views of synthetic Blender scenes.") parser.add_argument("blend_path", type=str, help="Path to the blend-file of the synthetic Blender scene.") parser.add_argument("renders_path", type=str, help="Desired path to the novel view renders.") parser.add_argument("num_views", type=int, help="Number of novel view renders.") parser.add_argument("resolution", type=int, nargs=2, default=[1080, 720], help="Image resolution of the novel view renders.") parser.add_argument("--color_space", type=str, choices=COLOR_SPACES, default="display", help="Color space of the output novel view images.") parser.add_argument("--device", type=str, choices=DEVICES, default="cuda", help="Compute device type for rendering.") parser.add_argument("--random_views", action="store_true", help="Randomly sample novel views.") parser.add_argument("--upper_views", action="store_true", help="Only sample novel views from the upper hemisphere.") parser.add_argument("--depth", action="store_true", help="Render depth maps too.") parser.add_argument("--normal", action="store_true", help="Render normal maps too.") args = parser.parse_args() main(args) # bpy.context.scene.unit_settings.scale_length = 0.01