Spaces:
Running
on
L40S
Running
on
L40S
JeffreyXiang
commited on
Commit
•
bd46f72
1
Parent(s):
3162a90
Add more setting
Browse files
app.py
CHANGED
@@ -15,6 +15,9 @@ from trellis.representations import Gaussian, MeshExtractResult
|
|
15 |
from trellis.utils import render_utils, postprocessing_utils
|
16 |
|
17 |
|
|
|
|
|
|
|
18 |
def preprocess_image(image: Image.Image) -> Image.Image:
|
19 |
"""
|
20 |
Preprocess the input image.
|
@@ -70,18 +73,39 @@ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
|
|
70 |
|
71 |
|
72 |
@spaces.GPU
|
73 |
-
def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
|
74 |
"""
|
75 |
Convert an image to a 3D model.
|
76 |
|
77 |
Args:
|
78 |
image (Image.Image): The input image.
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
Returns:
|
81 |
dict: The information of the generated 3D model.
|
82 |
str: The path to the video of the 3D model.
|
83 |
"""
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
86 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
87 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
@@ -131,11 +155,25 @@ with gr.Blocks() as demo:
|
|
131 |
with gr.Row():
|
132 |
with gr.Column():
|
133 |
image_prompt = gr.Image(label="Image Prompt", image_mode="RGBA", type="pil", height=300)
|
134 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
|
136 |
-
gr.
|
137 |
-
|
138 |
-
|
|
|
|
|
|
|
139 |
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
|
140 |
|
141 |
with gr.Column():
|
@@ -168,7 +206,7 @@ with gr.Blocks() as demo:
|
|
168 |
|
169 |
generate_btn.click(
|
170 |
image_to_3d,
|
171 |
-
inputs=[image_prompt],
|
172 |
outputs=[model, video_output],
|
173 |
).then(
|
174 |
activate_button,
|
|
|
15 |
from trellis.utils import render_utils, postprocessing_utils
|
16 |
|
17 |
|
18 |
+
MAX_SEED = np.iinfo(np.int32).max
|
19 |
+
|
20 |
+
|
21 |
def preprocess_image(image: Image.Image) -> Image.Image:
|
22 |
"""
|
23 |
Preprocess the input image.
|
|
|
73 |
|
74 |
|
75 |
@spaces.GPU
|
76 |
+
def image_to_3d(image: Image.Image, seed: int, randomize_seed: bool, ss_guidance_strength: float, ss_sampling_steps: int, slat_guidance_strength: float, slat_sampling_steps: int) -> Tuple[dict, str]:
|
77 |
"""
|
78 |
Convert an image to a 3D model.
|
79 |
|
80 |
Args:
|
81 |
image (Image.Image): The input image.
|
82 |
+
seed (int): The random seed.
|
83 |
+
randomize_seed (bool): Whether to randomize the seed.
|
84 |
+
ss_guidance_strength (float): The guidance strength for sparse structure generation.
|
85 |
+
ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
|
86 |
+
slat_guidance_strength (float): The guidance strength for structured latent generation.
|
87 |
+
slat_sampling_steps (int): The number of sampling steps for structured latent generation.
|
88 |
|
89 |
Returns:
|
90 |
dict: The information of the generated 3D model.
|
91 |
str: The path to the video of the 3D model.
|
92 |
"""
|
93 |
+
if randomize_seed:
|
94 |
+
seed = np.random.randint(0, MAX_SEED)
|
95 |
+
torch.manual_seed(seed)
|
96 |
+
outputs = pipeline(
|
97 |
+
image,
|
98 |
+
formats=["gaussian", "mesh"],
|
99 |
+
preprocess_image=False,
|
100 |
+
sparse_structure_sampler_params={
|
101 |
+
"steps": ss_sampling_steps,
|
102 |
+
"cfg_strength": ss_guidance_strength,
|
103 |
+
},
|
104 |
+
slat_sampler_params={
|
105 |
+
"steps": slat_sampling_steps,
|
106 |
+
"cfg_strength": slat_guidance_strength,
|
107 |
+
},
|
108 |
+
)
|
109 |
video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
|
110 |
video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
|
111 |
video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
|
|
|
155 |
with gr.Row():
|
156 |
with gr.Column():
|
157 |
image_prompt = gr.Image(label="Image Prompt", image_mode="RGBA", type="pil", height=300)
|
158 |
+
|
159 |
+
with gr.Accordion(label="Generation Settings", open=False):
|
160 |
+
seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
|
161 |
+
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
162 |
+
gr.Markdown("Stage 1: Sparse Structure Generation")
|
163 |
+
with gr.Row():
|
164 |
+
ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=5.0, step=0.1)
|
165 |
+
ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=25, step=1)
|
166 |
+
gr.Markdown("Stage 2: Structured Latent Generation")
|
167 |
+
with gr.Row():
|
168 |
+
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=5.0, step=0.1)
|
169 |
+
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=25, step=1)
|
170 |
|
171 |
+
generate_btn = gr.Button("Generate")
|
172 |
+
|
173 |
+
with gr.Accordion(label="GLB Extraction Settings", open=False):
|
174 |
+
mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
|
175 |
+
texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
|
176 |
+
|
177 |
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
|
178 |
|
179 |
with gr.Column():
|
|
|
206 |
|
207 |
generate_btn.click(
|
208 |
image_to_3d,
|
209 |
+
inputs=[image_prompt, seed, randomize_seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps],
|
210 |
outputs=[model, video_output],
|
211 |
).then(
|
212 |
activate_button,
|