abreza
commited on
Commit
•
bdf752e
0
Parent(s):
Initial commit
Browse files- .gitattributes +35 -0
- README.md +12 -0
- app.py +67 -0
- data/__init__.py +0 -0
- data/loader.py +59 -0
- requirements.txt +4 -0
- utils/__init__.py +0 -0
- utils/geometry.py +11 -0
- visualization/__init__.py +0 -0
- visualization/logger.py +187 -0
- visualization/visualizer.py +75 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: LensCraft
|
3 |
+
emoji: 👀
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.44.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio_rerun import Rerun
|
3 |
+
from data.loader import load_simulation_data
|
4 |
+
from visualization.visualizer import visualize_simulation
|
5 |
+
|
6 |
+
|
7 |
+
def update_simulation_dropdown(file):
|
8 |
+
simulations, descriptions = load_simulation_data(file)
|
9 |
+
return gr.Dropdown(
|
10 |
+
choices=descriptions if descriptions else [],
|
11 |
+
value=None,
|
12 |
+
allow_custom_value=False
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
def create_app():
|
17 |
+
with gr.Blocks() as demo:
|
18 |
+
gr.Markdown("""
|
19 |
+
# Camera Simulation Visualizer
|
20 |
+
Upload a JSON file containing camera simulation data and select a simulation to visualize.
|
21 |
+
""")
|
22 |
+
|
23 |
+
with gr.Row():
|
24 |
+
file_input = gr.File(
|
25 |
+
label="Upload Simulation JSON",
|
26 |
+
file_types=[".json"]
|
27 |
+
)
|
28 |
+
simulation_dropdown = gr.Dropdown(
|
29 |
+
label="Select Simulation",
|
30 |
+
choices=[],
|
31 |
+
type="index",
|
32 |
+
scale=2
|
33 |
+
)
|
34 |
+
|
35 |
+
frame_input = gr.Textbox(
|
36 |
+
label="Frame Selection",
|
37 |
+
placeholder="E.g. 1-30, 35, 40-50 (leave empty for all frames)"
|
38 |
+
)
|
39 |
+
|
40 |
+
with gr.Row():
|
41 |
+
viewer = Rerun(streaming=False)
|
42 |
+
|
43 |
+
file_input.change(
|
44 |
+
update_simulation_dropdown,
|
45 |
+
inputs=[file_input],
|
46 |
+
outputs=[simulation_dropdown]
|
47 |
+
)
|
48 |
+
|
49 |
+
simulation_dropdown.change(
|
50 |
+
visualize_simulation,
|
51 |
+
inputs=[file_input, simulation_dropdown, frame_input],
|
52 |
+
outputs=[viewer]
|
53 |
+
)
|
54 |
+
|
55 |
+
frame_input.change(
|
56 |
+
visualize_simulation,
|
57 |
+
inputs=[file_input, simulation_dropdown, frame_input],
|
58 |
+
outputs=[viewer]
|
59 |
+
)
|
60 |
+
|
61 |
+
|
62 |
+
return demo
|
63 |
+
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
demo = create_app()
|
67 |
+
demo.queue().launch(share=False)
|
data/__init__.py
ADDED
File without changes
|
data/loader.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import Optional, Dict, Any, List, Tuple
|
3 |
+
|
4 |
+
|
5 |
+
def create_instruction_description(instruction: Dict[str, Any]) -> str:
|
6 |
+
parts = []
|
7 |
+
|
8 |
+
movement = instruction.get('cameraMovement')
|
9 |
+
shot_type = instruction.get('initialShotType')
|
10 |
+
frames = instruction.get('frameCount')
|
11 |
+
subject_idx = instruction.get('subjectIndex')
|
12 |
+
|
13 |
+
if movement and shot_type:
|
14 |
+
parts.append(f"{movement} {shot_type}")
|
15 |
+
elif movement:
|
16 |
+
parts.append(f"{movement} shot")
|
17 |
+
elif shot_type:
|
18 |
+
parts.append(f"Static {shot_type}")
|
19 |
+
|
20 |
+
if subject_idx is not None:
|
21 |
+
parts.append(f"of subject {subject_idx}")
|
22 |
+
|
23 |
+
if frames is not None:
|
24 |
+
parts.append(f"lasting {frames} frames")
|
25 |
+
|
26 |
+
if not parts:
|
27 |
+
return "No camera instruction details available"
|
28 |
+
|
29 |
+
return " ".join(parts)
|
30 |
+
|
31 |
+
|
32 |
+
def load_simulation_data(file) -> Tuple[Optional[List[Dict[str, Any]]], Optional[List[str]]]:
|
33 |
+
if file is None:
|
34 |
+
return None, None
|
35 |
+
|
36 |
+
try:
|
37 |
+
json_data = json.load(open(file.name))
|
38 |
+
simulations = json_data['simulations']
|
39 |
+
|
40 |
+
descriptions = []
|
41 |
+
for i, sim in enumerate(simulations):
|
42 |
+
header = f"Simulation {i + 1}"
|
43 |
+
|
44 |
+
instruction_texts = []
|
45 |
+
for j, instruction in enumerate(sim['instructions']):
|
46 |
+
inst_desc = create_instruction_description(instruction)
|
47 |
+
instruction_texts.append(f" {j + 1}. {inst_desc}")
|
48 |
+
|
49 |
+
full_description = f"{header}\n" + "\n".join(instruction_texts)
|
50 |
+
|
51 |
+
subject_count = len(sim['subjects'])
|
52 |
+
full_description = f"{full_description}\n ({subject_count} subjects)"
|
53 |
+
|
54 |
+
descriptions.append(full_description)
|
55 |
+
|
56 |
+
return simulations, descriptions
|
57 |
+
except Exception as e:
|
58 |
+
print(f"Error loading simulation data: {str(e)}")
|
59 |
+
return None, None
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio_rerun
|
2 |
+
scipy==1.14.1
|
3 |
+
evo==1.30.3
|
4 |
+
gradio==4.40.0
|
utils/__init__.py
ADDED
File without changes
|
utils/geometry.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from scipy.spatial.transform import Rotation
|
3 |
+
from typing import Dict
|
4 |
+
|
5 |
+
|
6 |
+
def vector3_to_numpy(vec: Dict[str, float]) -> np.ndarray:
|
7 |
+
return np.array([vec['x'], vec['y'], vec['z']])
|
8 |
+
|
9 |
+
|
10 |
+
def euler_to_quaternion(euler: Dict[str, float]) -> np.ndarray:
|
11 |
+
return Rotation.from_euler('xyz', [euler['x'], euler['y'], euler['z']]).as_quat()
|
visualization/__init__.py
ADDED
File without changes
|
visualization/logger.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import rerun as rr
|
2 |
+
import numpy as np
|
3 |
+
from typing import Dict, Any, List
|
4 |
+
from utils.geometry import vector3_to_numpy, euler_to_quaternion
|
5 |
+
|
6 |
+
|
7 |
+
def create_subject_box(subject: Dict) -> Dict[str, np.ndarray]:
|
8 |
+
position = vector3_to_numpy(subject['position'])
|
9 |
+
size = vector3_to_numpy(subject['size'])
|
10 |
+
|
11 |
+
return {
|
12 |
+
'center': position,
|
13 |
+
'half_size': size / 2
|
14 |
+
}
|
15 |
+
|
16 |
+
|
17 |
+
class SimulationLogger:
|
18 |
+
def __init__(self):
|
19 |
+
rr.init("camera_simulation")
|
20 |
+
rr.log("world", rr.ViewCoordinates.RIGHT_HAND_Y_UP, timeless=True)
|
21 |
+
|
22 |
+
self.K = np.array([
|
23 |
+
[500, 0, 960],
|
24 |
+
[0, 500, 540],
|
25 |
+
[0, 0, 1]
|
26 |
+
])
|
27 |
+
|
28 |
+
def log_metadata(self, instructions: List[Dict[str, Any]]) -> None:
|
29 |
+
if not instructions:
|
30 |
+
return
|
31 |
+
|
32 |
+
rr.log("metadata/instructions", rr.TextDocument(
|
33 |
+
"\n".join([
|
34 |
+
f"Instruction {i+1}:\n" +
|
35 |
+
f" Movement: {inst.get('cameraMovement', 'N/A')}\n" +
|
36 |
+
f" Easing: {inst.get('movementEasing', 'N/A')}\n" +
|
37 |
+
f" Frames: {inst.get('frameCount', 'N/A')}\n" +
|
38 |
+
f" Camera Angle: {inst.get('initialCameraAngle', 'N/A')}\n" +
|
39 |
+
f" Shot Type: {inst.get('initialShotType', 'N/A')}\n" +
|
40 |
+
f" Subject Index: {inst.get('subjectIndex', 'N/A')}"
|
41 |
+
for i, inst in enumerate(instructions)
|
42 |
+
])
|
43 |
+
), timeless=True)
|
44 |
+
|
45 |
+
def log_subjects(self, subjects: List[Dict[str, Any]]) -> None:
|
46 |
+
if not subjects:
|
47 |
+
return
|
48 |
+
|
49 |
+
centers = []
|
50 |
+
half_sizes = []
|
51 |
+
colors = []
|
52 |
+
labels = []
|
53 |
+
|
54 |
+
for subject in subjects:
|
55 |
+
try:
|
56 |
+
box_params = create_subject_box(subject)
|
57 |
+
centers.append(box_params['center'])
|
58 |
+
half_sizes.append(box_params['half_size'])
|
59 |
+
colors.append([0.8, 0.2, 0.2, 1.0])
|
60 |
+
labels.append(subject.get('objectClass', 'Unknown'))
|
61 |
+
except Exception as e:
|
62 |
+
print(f"Error creating box parameters: {str(e)}")
|
63 |
+
continue
|
64 |
+
|
65 |
+
if centers:
|
66 |
+
rr.log(
|
67 |
+
"world/subjects",
|
68 |
+
rr.Boxes3D(
|
69 |
+
centers=np.array(centers),
|
70 |
+
half_sizes=np.array(half_sizes),
|
71 |
+
colors=np.array(colors),
|
72 |
+
show_labels=False,
|
73 |
+
fill_mode="solid"
|
74 |
+
),
|
75 |
+
timeless=True
|
76 |
+
)
|
77 |
+
|
78 |
+
def log_camera_trajectory(self, camera_frames: List[Dict[str, Any]]) -> None:
|
79 |
+
if not camera_frames:
|
80 |
+
return
|
81 |
+
|
82 |
+
try:
|
83 |
+
camera_positions = np.array([
|
84 |
+
vector3_to_numpy(frame['position']) for frame in camera_frames
|
85 |
+
])
|
86 |
+
rr.log(
|
87 |
+
"world/camera_trajectory",
|
88 |
+
rr.Points3D(
|
89 |
+
camera_positions,
|
90 |
+
colors=np.full((len(camera_positions), 4),
|
91 |
+
[0.0, 0.8, 0.8, 1.0])
|
92 |
+
),
|
93 |
+
timeless=True
|
94 |
+
)
|
95 |
+
|
96 |
+
if len(camera_positions) > 1:
|
97 |
+
lines = np.stack(
|
98 |
+
[camera_positions[:-1], camera_positions[1:]], axis=1)
|
99 |
+
rr.log(
|
100 |
+
"world/camera_trajectory/line",
|
101 |
+
rr.LineStrips3D(
|
102 |
+
lines,
|
103 |
+
colors=[(0.0, 0.8, 0.8, 1.0)]
|
104 |
+
),
|
105 |
+
timeless=True
|
106 |
+
)
|
107 |
+
|
108 |
+
except Exception as e:
|
109 |
+
print(f"Error logging camera trajectory: {str(e)}")
|
110 |
+
|
111 |
+
def log_camera_frames(self, camera_frames: List[Dict[str, Any]]) -> None:
|
112 |
+
if not camera_frames:
|
113 |
+
return
|
114 |
+
|
115 |
+
for frame_idx, camera_frame in enumerate(camera_frames):
|
116 |
+
try:
|
117 |
+
rr.set_time_sequence("frame_idx", frame_idx)
|
118 |
+
|
119 |
+
position = vector3_to_numpy(camera_frame['position'])
|
120 |
+
rotation_q = euler_to_quaternion(camera_frame['angle'])
|
121 |
+
|
122 |
+
rr.log(
|
123 |
+
"world/camera",
|
124 |
+
rr.Transform3D(
|
125 |
+
translation=position,
|
126 |
+
rotation=rr.Quaternion(xyzw=rotation_q)
|
127 |
+
)
|
128 |
+
)
|
129 |
+
|
130 |
+
rr.log(
|
131 |
+
"world/camera/image",
|
132 |
+
rr.Pinhole(
|
133 |
+
image_from_camera=self.K,
|
134 |
+
width=1920,
|
135 |
+
height=1080
|
136 |
+
)
|
137 |
+
)
|
138 |
+
|
139 |
+
except Exception as e:
|
140 |
+
print(f"Error logging camera frame {frame_idx}: {str(e)}")
|
141 |
+
|
142 |
+
def log_helper_keyframes(self, helper_keyframes: List[Dict[str, Any]]) -> None:
|
143 |
+
if not helper_keyframes:
|
144 |
+
return
|
145 |
+
|
146 |
+
|
147 |
+
helper_positions = np.array([
|
148 |
+
vector3_to_numpy(frame['position']) for frame in helper_keyframes
|
149 |
+
])
|
150 |
+
rr.log(
|
151 |
+
"world/helper_keyframes",
|
152 |
+
rr.Points3D(
|
153 |
+
helper_positions,
|
154 |
+
radii=np.full(len(helper_positions), 0.03),
|
155 |
+
colors=np.full((len(helper_positions), 4), [1.0, 1.0, 0.0, 1.0]),
|
156 |
+
),
|
157 |
+
timeless=True
|
158 |
+
)
|
159 |
+
|
160 |
+
for keyframe_idx, helper_keyframe in enumerate(helper_keyframes):
|
161 |
+
try:
|
162 |
+
position = vector3_to_numpy(helper_keyframe['position'])
|
163 |
+
rotation_q = euler_to_quaternion(helper_keyframe['angle'])
|
164 |
+
|
165 |
+
rr.log(
|
166 |
+
f"world/helper_camera_{keyframe_idx}",
|
167 |
+
rr.Transform3D(
|
168 |
+
translation=position,
|
169 |
+
rotation=rr.Quaternion(xyzw=rotation_q),
|
170 |
+
scale=(.5, .5, .5)
|
171 |
+
),
|
172 |
+
timeless=True
|
173 |
+
)
|
174 |
+
|
175 |
+
rr.log(
|
176 |
+
f"world/helper_camera_{keyframe_idx}/image",
|
177 |
+
rr.Pinhole(
|
178 |
+
image_from_camera=self.K,
|
179 |
+
width=1920,
|
180 |
+
height=1080,
|
181 |
+
),
|
182 |
+
timeless=True
|
183 |
+
)
|
184 |
+
|
185 |
+
except Exception as e:
|
186 |
+
print(
|
187 |
+
f"Error logging helper keyframe {keyframe_idx}: {str(e)}")
|
visualization/visualizer.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
import os
|
3 |
+
import spaces
|
4 |
+
from typing import Optional, List
|
5 |
+
from data.loader import load_simulation_data
|
6 |
+
from visualization.logger import SimulationLogger
|
7 |
+
import rerun as rr
|
8 |
+
|
9 |
+
|
10 |
+
def parse_frame_selection(selection_str: str, max_frame: int) -> List[int]:
|
11 |
+
if not selection_str:
|
12 |
+
return list(range(max_frame))
|
13 |
+
|
14 |
+
frames = set()
|
15 |
+
segments = selection_str.split(',')
|
16 |
+
for seg in segments:
|
17 |
+
seg = seg.strip()
|
18 |
+
if '-' in seg:
|
19 |
+
# range
|
20 |
+
start_str, end_str = seg.split('-', 1)
|
21 |
+
start, end = int(start_str), int(end_str)
|
22 |
+
# Convert to 0-based indexing
|
23 |
+
for f in range(start - 1, end):
|
24 |
+
if 0 <= f < max_frame:
|
25 |
+
frames.add(f)
|
26 |
+
else:
|
27 |
+
# single frame
|
28 |
+
f = int(seg) - 1
|
29 |
+
if 0 <= f < max_frame:
|
30 |
+
frames.add(f)
|
31 |
+
|
32 |
+
return sorted(frames)
|
33 |
+
|
34 |
+
|
35 |
+
@spaces.GPU
|
36 |
+
def visualize_simulation(file, simulation_index: Optional[int], frame_selection: str) -> Optional[str]:
|
37 |
+
if file is None or simulation_index is None:
|
38 |
+
return None
|
39 |
+
|
40 |
+
try:
|
41 |
+
simulations, _ = load_simulation_data(file)
|
42 |
+
if not simulations or not isinstance(simulation_index, int) or simulation_index < 0 or simulation_index >= len(simulations):
|
43 |
+
print(f"Invalid simulation data or index: {simulation_index}")
|
44 |
+
return None
|
45 |
+
|
46 |
+
simulation = simulations[simulation_index]
|
47 |
+
|
48 |
+
camera_frames = simulation.get('cameraFrames', [])
|
49 |
+
max_frame = len(camera_frames)
|
50 |
+
selected_frames = parse_frame_selection(frame_selection, max_frame)
|
51 |
+
|
52 |
+
temp_dir = tempfile.mkdtemp()
|
53 |
+
rrd_path = os.path.join(temp_dir, "simulation.rrd")
|
54 |
+
|
55 |
+
logger = SimulationLogger()
|
56 |
+
logger.log_metadata(simulation['instructions'])
|
57 |
+
logger.log_subjects(simulation['subjects'])
|
58 |
+
|
59 |
+
if 'helper_keyframes' in simulation:
|
60 |
+
logger.log_helper_keyframes(simulation['helper_keyframes'])
|
61 |
+
|
62 |
+
# Filter camera trajectory to selected frames
|
63 |
+
selected_camera_frames = [camera_frames[i] for i in selected_frames]
|
64 |
+
|
65 |
+
# Log the trajectory and frames only for the selected frames
|
66 |
+
logger.log_camera_trajectory(selected_camera_frames)
|
67 |
+
logger.log_camera_frames(selected_camera_frames)
|
68 |
+
|
69 |
+
rr.save(rrd_path)
|
70 |
+
|
71 |
+
return rrd_path
|
72 |
+
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Error processing simulation: {str(e)}")
|
75 |
+
return None
|