Upload 206 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- indoor_motor/bpy_render_views.py +193 -0
- indoor_motor/get_trajectory_in_blender.py +67 -0
- indoor_motor/get_trajectory_in_blender_quan.py +68 -0
- indoor_motor/output_in_blender_quan.csv +0 -0
- indoor_motor/views/test/r_1.png +3 -0
- indoor_motor/views/test/r_100.png +3 -0
- indoor_motor/views/test/r_101.png +3 -0
- indoor_motor/views/test/r_103.png +3 -0
- indoor_motor/views/test/r_104.png +3 -0
- indoor_motor/views/test/r_105.png +3 -0
- indoor_motor/views/test/r_106.png +3 -0
- indoor_motor/views/test/r_108.png +3 -0
- indoor_motor/views/test/r_109.png +3 -0
- indoor_motor/views/test/r_111.png +3 -0
- indoor_motor/views/test/r_112.png +3 -0
- indoor_motor/views/test/r_113.png +3 -0
- indoor_motor/views/test/r_114.png +3 -0
- indoor_motor/views/test/r_116.png +3 -0
- indoor_motor/views/test/r_117.png +3 -0
- indoor_motor/views/test/r_118.png +3 -0
- indoor_motor/views/test/r_120.png +3 -0
- indoor_motor/views/test/r_121.png +3 -0
- indoor_motor/views/test/r_124.png +3 -0
- indoor_motor/views/test/r_125.png +3 -0
- indoor_motor/views/test/r_127.png +3 -0
- indoor_motor/views/test/r_128.png +3 -0
- indoor_motor/views/test/r_131.png +3 -0
- indoor_motor/views/test/r_132.png +3 -0
- indoor_motor/views/test/r_134.png +3 -0
- indoor_motor/views/test/r_142.png +3 -0
- indoor_motor/views/test/r_143.png +3 -0
- indoor_motor/views/test/r_144.png +3 -0
- indoor_motor/views/test/r_146.png +3 -0
- indoor_motor/views/test/r_148.png +3 -0
- indoor_motor/views/test/r_152.png +3 -0
- indoor_motor/views/test/r_157.png +3 -0
- indoor_motor/views/test/r_158.png +3 -0
- indoor_motor/views/test/r_159.png +3 -0
- indoor_motor/views/test/r_160.png +3 -0
- indoor_motor/views/test/r_162.png +3 -0
- indoor_motor/views/test/r_163.png +3 -0
- indoor_motor/views/test/r_165.png +3 -0
- indoor_motor/views/test/r_166.png +3 -0
- indoor_motor/views/test/r_168.png +3 -0
- indoor_motor/views/test/r_169.png +3 -0
- indoor_motor/views/test/r_174.png +3 -0
- indoor_motor/views/test/r_175.png +3 -0
- indoor_motor/views/test/r_177.png +3 -0
- indoor_motor/views/test/r_178.png +3 -0
- indoor_motor/views/test/r_179.png +3 -0
indoor_motor/bpy_render_views.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
from math import radians
|
5 |
+
import bpy
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
COLOR_SPACES = ["display", "linear"]
|
9 |
+
DEVICES = ["cpu", "cuda", "optix"]
|
10 |
+
|
11 |
+
def listify_matrix(matrix):
|
12 |
+
matrix_list = []
|
13 |
+
for row in matrix:
|
14 |
+
matrix_list.append(list(row))
|
15 |
+
return matrix_list
|
16 |
+
|
17 |
+
def parent_obj_to_camera(b_camera, origin):
|
18 |
+
b_empty = bpy.data.objects.new("Empty", None)
|
19 |
+
b_empty.location = origin
|
20 |
+
b_camera.parent = b_empty
|
21 |
+
|
22 |
+
scn = bpy.context.scene
|
23 |
+
scn.collection.objects.link(b_empty)
|
24 |
+
bpy.context.view_layer.objects.active = b_empty
|
25 |
+
|
26 |
+
return b_empty
|
27 |
+
|
28 |
+
def main(args):
|
29 |
+
bpy.ops.wm.open_mainfile(filepath=args.blend_path)
|
30 |
+
|
31 |
+
scene = bpy.data.scenes["Scene"]
|
32 |
+
scene.render.engine = "CYCLES"
|
33 |
+
scene.render.use_persistent_data = True
|
34 |
+
scene.cycles.samples = 256
|
35 |
+
bpy.context.scene.unit_settings.scale_length = 0.01
|
36 |
+
if args.device == "cpu":
|
37 |
+
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "NONE"
|
38 |
+
bpy.context.scene.cycles.device = "CPU"
|
39 |
+
elif args.device == "cuda":
|
40 |
+
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "CUDA"
|
41 |
+
bpy.context.scene.cycles.device = "GPU"
|
42 |
+
elif args.device == "optix":
|
43 |
+
bpy.context.preferences.addons["cycles"].preferences.compute_device_type = "OPTIX"
|
44 |
+
bpy.context.scene.cycles.device = "GPU"
|
45 |
+
bpy.context.preferences.addons["cycles"].preferences.get_devices()
|
46 |
+
|
47 |
+
scene.view_layers[0].use_pass_combined = True
|
48 |
+
scene.use_nodes = True
|
49 |
+
tree = scene.node_tree
|
50 |
+
|
51 |
+
if args.depth:
|
52 |
+
scene.view_layers[0].use_pass_z = True
|
53 |
+
combine_color = tree.nodes.new("CompositorNodeCombineColor")
|
54 |
+
depth_output = tree.nodes.new("CompositorNodeOutputFile")
|
55 |
+
if args.normal:
|
56 |
+
scene.view_layers[0].use_pass_normal = True
|
57 |
+
normal_output = tree.nodes.new("CompositorNodeOutputFile")
|
58 |
+
if args.depth or args.normal:
|
59 |
+
render_layers = tree.nodes.new("CompositorNodeRLayers")
|
60 |
+
|
61 |
+
scene.render.filepath = args.renders_path
|
62 |
+
scene.render.use_file_extension = True
|
63 |
+
scene.render.use_overwrite = True
|
64 |
+
scene.render.image_settings.color_mode = "RGBA"
|
65 |
+
|
66 |
+
if args.color_space == "display":
|
67 |
+
scene.render.image_settings.file_format = "PNG"
|
68 |
+
scene.render.image_settings.color_depth = "8"
|
69 |
+
scene.render.image_settings.color_management = "FOLLOW_SCENE"
|
70 |
+
elif args.color_space == "linear":
|
71 |
+
scene.render.image_settings.file_format = "OPEN_EXR"
|
72 |
+
scene.render.image_settings.color_depth = "32"
|
73 |
+
|
74 |
+
if args.depth:
|
75 |
+
depth_output.base_path = os.path.join(args.renders_path, "depth")
|
76 |
+
depth_output.file_slots[0].use_node_format = True
|
77 |
+
scene.frame_set(0)
|
78 |
+
|
79 |
+
depth_output.format.file_format = "OPEN_EXR"
|
80 |
+
depth_output.format.color_mode = "RGB"
|
81 |
+
depth_output.format.color_depth = "32"
|
82 |
+
depth_output.format.exr_codec = "NONE"
|
83 |
+
|
84 |
+
links = tree.links
|
85 |
+
combine_color.mode = "RGB"
|
86 |
+
links.new(render_layers.outputs["Depth"], combine_color.inputs["Red"])
|
87 |
+
combine_color.inputs["Green"].default_value = 0
|
88 |
+
combine_color.inputs["Blue"].default_value = 0
|
89 |
+
combine_color.inputs["Alpha"].default_value = 1
|
90 |
+
|
91 |
+
links.new(combine_color.outputs["Image"], depth_output.inputs["Image"])
|
92 |
+
|
93 |
+
if args.normal:
|
94 |
+
normal_output.base_path = os.path.join(args.renders_path, "normal")
|
95 |
+
normal_output.file_slots[0].use_node_format = True
|
96 |
+
scene.frame_set(0)
|
97 |
+
|
98 |
+
normal_output.format.file_format = "OPEN_EXR"
|
99 |
+
normal_output.format.color_mode = "RGB"
|
100 |
+
normal_output.format.color_depth = "32"
|
101 |
+
normal_output.format.exr_codec = "NONE"
|
102 |
+
|
103 |
+
links = tree.links
|
104 |
+
combine_color.mode = "RGB"
|
105 |
+
links.new(render_layers.outputs["Normal"], normal_output.inputs["Image"])
|
106 |
+
|
107 |
+
scene.render.dither_intensity = 0.0
|
108 |
+
scene.render.film_transparent = True
|
109 |
+
scene.render.resolution_percentage = 100
|
110 |
+
scene.render.resolution_x = args.resolution[0]
|
111 |
+
scene.render.resolution_y = args.resolution[1]
|
112 |
+
|
113 |
+
cam = bpy.data.objects["Camera"]
|
114 |
+
cam.location = (0.0, -3.5 , 0.5)
|
115 |
+
cam.rotation_mode = "XYZ"
|
116 |
+
cam_constraint = cam.constraints.new(type="TRACK_TO")
|
117 |
+
cam_constraint.track_axis = "TRACK_NEGATIVE_Z"
|
118 |
+
cam_constraint.up_axis = "UP_Y"
|
119 |
+
b_empty = parent_obj_to_camera(cam, (0, -4.0, 1.0))
|
120 |
+
cam_constraint.target = b_empty
|
121 |
+
|
122 |
+
args.renders_path = os.path.normpath(args.renders_path)
|
123 |
+
folder_name = os.path.basename(args.renders_path)
|
124 |
+
renders_parent_path = os.path.dirname(args.renders_path)
|
125 |
+
transforms_path = os.path.join(renders_parent_path, f"transforms_{folder_name}.json")
|
126 |
+
|
127 |
+
stepsize = 360.0 / args.num_views
|
128 |
+
out_data = {
|
129 |
+
"camera_angle_x": cam.data.angle_x,
|
130 |
+
"frames": []
|
131 |
+
}
|
132 |
+
|
133 |
+
for i in range(args.num_views):
|
134 |
+
if args.random_views:
|
135 |
+
if args.upper_views:
|
136 |
+
# 从上半球随机采样视图
|
137 |
+
# 限制 x 轴(pitch)的旋转范围以避免向下拍摄
|
138 |
+
pitch = radians(np.random.uniform(-15.0 , 15.0)) # 限制俯仰角在 0 到 90 度之间
|
139 |
+
yaw = radians(np.random.uniform(0, 360)) # 随机偏航角
|
140 |
+
b_empty.rotation_euler = (pitch, 0, yaw)
|
141 |
+
else:
|
142 |
+
# 完全随机采样视图
|
143 |
+
b_empty.rotation_euler = (
|
144 |
+
radians(np.random.uniform(0, 180)),
|
145 |
+
0,
|
146 |
+
radians(np.random.uniform(0, 360))
|
147 |
+
)
|
148 |
+
else:
|
149 |
+
# 等间隔采样视图
|
150 |
+
b_empty.rotation_euler[2] = radians(i * stepsize)
|
151 |
+
|
152 |
+
scene.render.filepath = os.path.join(args.renders_path, f"r_{i}")
|
153 |
+
if args.depth:
|
154 |
+
depth_output.file_slots[0].path = f"r_{i}"
|
155 |
+
if args.normal:
|
156 |
+
normal_output.file_slots[0].path = f"r_{i}"
|
157 |
+
bpy.ops.render.render(write_still=True)
|
158 |
+
|
159 |
+
if args.depth:
|
160 |
+
os.rename(os.path.join(depth_output.base_path, f"r_{i}0000.exr"),
|
161 |
+
os.path.join(depth_output.base_path, f"r_{i}.exr"))
|
162 |
+
if args.normal:
|
163 |
+
os.rename(os.path.join(normal_output.base_path, f"r_{i}0000.exr"),
|
164 |
+
os.path.join(normal_output.base_path, f"r_{i}.exr"))
|
165 |
+
|
166 |
+
frame_data = {
|
167 |
+
"file_path": os.path.join(".", os.path.relpath(scene.render.filepath, start=renders_parent_path)),
|
168 |
+
"rotation": radians(i * stepsize),
|
169 |
+
"transform_matrix": listify_matrix(cam.matrix_world)
|
170 |
+
}
|
171 |
+
out_data["frames"].append(frame_data)
|
172 |
+
|
173 |
+
with open(transforms_path, "w") as out_file:
|
174 |
+
json.dump(out_data, out_file, indent=4)
|
175 |
+
|
176 |
+
if __name__ == "__main__":
|
177 |
+
parser = argparse.ArgumentParser(description="Script for rendering novel views of synthetic Blender scenes.")
|
178 |
+
parser.add_argument("blend_path", type=str, help="Path to the blend-file of the synthetic Blender scene.")
|
179 |
+
parser.add_argument("renders_path", type=str, help="Desired path to the novel view renders.")
|
180 |
+
parser.add_argument("num_views", type=int, help="Number of novel view renders.")
|
181 |
+
parser.add_argument("resolution", type=int, nargs=2, default=[1080, 720], help="Image resolution of the novel view renders.")
|
182 |
+
parser.add_argument("--color_space", type=str, choices=COLOR_SPACES, default="display", help="Color space of the output novel view images.")
|
183 |
+
parser.add_argument("--device", type=str, choices=DEVICES, default="cuda", help="Compute device type for rendering.")
|
184 |
+
parser.add_argument("--random_views", action="store_true", help="Randomly sample novel views.")
|
185 |
+
parser.add_argument("--upper_views", action="store_true", help="Only sample novel views from the upper hemisphere.")
|
186 |
+
parser.add_argument("--depth", action="store_true", help="Render depth maps too.")
|
187 |
+
parser.add_argument("--normal", action="store_true", help="Render normal maps too.")
|
188 |
+
args = parser.parse_args()
|
189 |
+
|
190 |
+
main(args)
|
191 |
+
# python bpy_render_views.py /home/falcary/workstation/blender_env_indoors_dataset/outputs/Capsule/Capsule_Trajetory/C ./test/ 200 800 800 --color_space display --device cuda --random_views --depth --normal
|
192 |
+
# python bpy_render_views.py /home/falcary/workstation/blender_env_indoors_dataset/indoors_car/car_obj/Lighting Scene-1_Trajectory.blend ./val/ 10 800 800 --color_space display --device optix --depth --normal
|
193 |
+
|
indoor_motor/get_trajectory_in_blender.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import bpy
|
2 |
+
import csv
|
3 |
+
from math import pi
|
4 |
+
|
5 |
+
# 设置导出文件的路径
|
6 |
+
export_file = "/home/falcary/workstation/blender_env_indoors_dataset/outputs/Capsule/output_in_blender.csv"
|
7 |
+
|
8 |
+
# 假设动画以24帧每秒的速度播放
|
9 |
+
# 每帧之间的时间间隔(秒)
|
10 |
+
frame_duration = 1 / 10
|
11 |
+
|
12 |
+
# 获取当前激活的相机对象
|
13 |
+
camera = bpy.context.scene.camera
|
14 |
+
|
15 |
+
# 如果存在相机且相机类型是'CAMERA'
|
16 |
+
if camera is not None and camera.type == 'CAMERA':
|
17 |
+
# 获取当前场景的起始和结束帧
|
18 |
+
start_frame = bpy.context.scene.frame_start
|
19 |
+
end_frame = bpy.context.scene.frame_end
|
20 |
+
|
21 |
+
# 准备存储相机变换数据的列表
|
22 |
+
camera_transforms = []
|
23 |
+
|
24 |
+
# 遍历指定的帧范围内的每一帧
|
25 |
+
for frame in range(start_frame, end_frame + 1):
|
26 |
+
# 设置当前帧
|
27 |
+
bpy.context.scene.frame_set(frame)
|
28 |
+
# 更新场景以获取最新数据
|
29 |
+
bpy.context.view_layer.update()
|
30 |
+
bpy.context.evaluated_depsgraph_get().update()
|
31 |
+
|
32 |
+
# 获取相机的世界坐标位置
|
33 |
+
loc = camera.matrix_world.to_translation()
|
34 |
+
# 获取相机的世界旋转(欧拉角)
|
35 |
+
rot = camera.matrix_world.to_euler('XYZ')
|
36 |
+
|
37 |
+
# 计算时间戳,假设帧率为24fps,将帧转换为微秒
|
38 |
+
timestamp = (frame - start_frame) * frame_duration * 1e7
|
39 |
+
scale = 1.0
|
40 |
+
# 添加位置和旋转数据到列表
|
41 |
+
camera_transforms.append({
|
42 |
+
'timestamp': int(timestamp), # 时间戳为整数微秒
|
43 |
+
'x': loc.x * scale,
|
44 |
+
'y': loc.y * scale,
|
45 |
+
'z': loc.z * scale,
|
46 |
+
# 将旋转角度转换为弧度,确保在[0, 2*pi)范围内
|
47 |
+
'rx': (rot.x + 2 * pi) % (2 * pi),
|
48 |
+
'ry': (rot.y + 2 * pi) % (2 * pi),
|
49 |
+
'rz': (rot.z + 2 * pi) % (2 * pi),
|
50 |
+
})
|
51 |
+
|
52 |
+
# 导出到CSV文件
|
53 |
+
with open(export_file, 'w', newline='') as csvfile:
|
54 |
+
fieldnames = ['timestamp', 'x', 'y', 'z', 'rx', 'ry', 'rz']
|
55 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
56 |
+
|
57 |
+
# 写入表头
|
58 |
+
csvfile.write('# ' + ', '.join(fieldnames) + '\n')
|
59 |
+
|
60 |
+
# 写入变换数据
|
61 |
+
for transform in camera_transforms:
|
62 |
+
writer.writerow(transform)
|
63 |
+
|
64 |
+
print(f"Camera transforms exported to {export_file}")
|
65 |
+
else:
|
66 |
+
print("No camera selected or active object is not a camera.")
|
67 |
+
|
indoor_motor/get_trajectory_in_blender_quan.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import bpy
|
2 |
+
import csv
|
3 |
+
from math import pi
|
4 |
+
|
5 |
+
# 设置导出文件的路
|
6 |
+
export_file = "/home/falcary/workstation/blender_env_indoors_dataset/outputs/AI58_002/output_in_blender_quan.csv"
|
7 |
+
|
8 |
+
# 假设动画以24帧每秒的速度播放
|
9 |
+
# 每帧之间的时间间隔(秒)
|
10 |
+
frame_duration = 1 / 10
|
11 |
+
|
12 |
+
# 获取当前激活的相机对象
|
13 |
+
camera = bpy.context.scene.camera
|
14 |
+
bpy.context.scene.unit_settings.scale_length = 1.00
|
15 |
+
|
16 |
+
# 如果存在相机且相机类型是'CAMERA'
|
17 |
+
if camera is not None and camera.type == 'CAMERA':
|
18 |
+
# 获取当前场景的起始和结束帧
|
19 |
+
start_frame = bpy.context.scene.frame_start
|
20 |
+
end_frame = bpy.context.scene.frame_end
|
21 |
+
|
22 |
+
# 准备存储相机变换数据的列表
|
23 |
+
camera_transforms = []
|
24 |
+
|
25 |
+
# 遍历指定的帧范围内的每一帧
|
26 |
+
for frame in range(start_frame, end_frame + 1):
|
27 |
+
# 设置当前帧
|
28 |
+
bpy.context.scene.frame_set(frame)
|
29 |
+
# 更新场景以获取最新数据
|
30 |
+
bpy.context.view_layer.update()
|
31 |
+
bpy.context.evaluated_depsgraph_get().update()
|
32 |
+
|
33 |
+
# 获取相机的世界坐标位置
|
34 |
+
loc = camera.matrix_world.to_translation()
|
35 |
+
# 获取相机的世界旋转(四元数)
|
36 |
+
quat = camera.matrix_world.to_quaternion()
|
37 |
+
|
38 |
+
# 计算时间戳,假设帧率为24fps,将帧转换为微秒
|
39 |
+
timestamp = (frame - start_frame) * frame_duration * 1e7
|
40 |
+
scale = bpy.context.scene.unit_settings.scale_length
|
41 |
+
# 添加位置和旋转数据到列表
|
42 |
+
camera_transforms.append({
|
43 |
+
'timestamp': int(timestamp), # 时间戳为整数微秒
|
44 |
+
'x': loc.x * scale,
|
45 |
+
'y': loc.y * scale,
|
46 |
+
'z': loc.z * scale,
|
47 |
+
'qx': quat.x,
|
48 |
+
'qy': quat.y,
|
49 |
+
'qz': quat.z,
|
50 |
+
'qw': quat.w,
|
51 |
+
})
|
52 |
+
|
53 |
+
# 导出到CSV文件
|
54 |
+
with open(export_file, 'w', newline='') as csvfile:
|
55 |
+
fieldnames = ['timestamp', 'x', 'y', 'z', 'qx', 'qy', 'qz', 'qw']
|
56 |
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
57 |
+
|
58 |
+
# 写入表头
|
59 |
+
csvfile.write('# ' + ', '.join(fieldnames) + '\n')
|
60 |
+
|
61 |
+
# 写入变换数据
|
62 |
+
for transform in camera_transforms:
|
63 |
+
writer.writerow(transform)
|
64 |
+
|
65 |
+
print(f"Camera transforms exported to {export_file}")
|
66 |
+
else:
|
67 |
+
print("No camera selected or active object is not a camera.")
|
68 |
+
|
indoor_motor/output_in_blender_quan.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
indoor_motor/views/test/r_1.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_100.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_101.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_103.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_104.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_105.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_106.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_108.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_109.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_111.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_112.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_113.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_114.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_116.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_117.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_118.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_120.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_121.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_124.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_125.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_127.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_128.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_131.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_132.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_134.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_142.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_143.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_144.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_146.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_148.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_152.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_157.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_158.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_159.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_160.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_162.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_163.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_165.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_166.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_168.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_169.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_174.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_175.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_177.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_178.png
ADDED
![]() |
Git LFS Details
|
indoor_motor/views/test/r_179.png
ADDED
![]() |
Git LFS Details
|