Spaces:
Running
on
T4
Running
on
T4
Jean Yu
commited on
Commit
•
e7abe49
1
Parent(s):
caef638
Add denoise_steps to enable optimization via early stopped diffusion process
Browse files
app.py
CHANGED
@@ -44,18 +44,19 @@ def predict(
|
|
44 |
prompt: str,
|
45 |
negative_prompt: str,
|
46 |
guidance_scale: float = 5.0,
|
|
|
47 |
seed: int = 0,
|
48 |
randomize_seed: bool = True,
|
49 |
):
|
50 |
generator = torch.Generator() if randomize_seed else torch.manual_seed(seed)
|
51 |
output = pipe(
|
52 |
-
|
53 |
width=1024,
|
54 |
height=512,
|
55 |
negative_prompt=negative_prompt,
|
56 |
guidance_scale=guidance_scale,
|
57 |
generator=generator,
|
58 |
-
num_inference_steps=
|
59 |
) # type: ignore
|
60 |
rgb_image, depth_image = output.rgb[0], output.depth[0] # type: ignore
|
61 |
with NamedTemporaryFile(suffix=".png", delete=False, dir="tmp") as rgb_file:
|
@@ -87,6 +88,9 @@ For better results, specify "360 view of" or "panoramic view of" in the prompt
|
|
87 |
guidance_scale = gr.Slider(
|
88 |
label="Guidance Scale", minimum=0, maximum=10, step=0.1, value=5.0
|
89 |
)
|
|
|
|
|
|
|
90 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
91 |
seed = gr.Slider(label="Seed", minimum=0,
|
92 |
maximum=2**64 - 1, step=1)
|
@@ -101,16 +105,16 @@ For better results, specify "360 view of" or "panoramic view of" in the prompt
|
|
101 |
depth = gr.Image(label="Depth Image", type="filepath")
|
102 |
gr.Examples(
|
103 |
examples=[
|
104 |
-
["360 view of a large bedroom", "", 7.0, 42, False]],
|
105 |
-
inputs=[prompt, negative_prompt, guidance_scale, seed, randomize_seed],
|
106 |
outputs=[rgb, depth, generated_seed, html],
|
107 |
fn=predict,
|
108 |
cache_examples=True)
|
109 |
|
110 |
new_btn.click(
|
111 |
fn=predict,
|
112 |
-
inputs=[prompt, negative_prompt, guidance_scale, seed, randomize_seed],
|
113 |
outputs=[rgb, depth, generated_seed, html],
|
114 |
)
|
115 |
|
116 |
-
block.launch()
|
|
|
44 |
prompt: str,
|
45 |
negative_prompt: str,
|
46 |
guidance_scale: float = 5.0,
|
47 |
+
denoise_steps: int = 50,
|
48 |
seed: int = 0,
|
49 |
randomize_seed: bool = True,
|
50 |
):
|
51 |
generator = torch.Generator() if randomize_seed else torch.manual_seed(seed)
|
52 |
output = pipe(
|
53 |
+
prompt,
|
54 |
width=1024,
|
55 |
height=512,
|
56 |
negative_prompt=negative_prompt,
|
57 |
guidance_scale=guidance_scale,
|
58 |
generator=generator,
|
59 |
+
num_inference_steps=denoise_steps,
|
60 |
) # type: ignore
|
61 |
rgb_image, depth_image = output.rgb[0], output.depth[0] # type: ignore
|
62 |
with NamedTemporaryFile(suffix=".png", delete=False, dir="tmp") as rgb_file:
|
|
|
88 |
guidance_scale = gr.Slider(
|
89 |
label="Guidance Scale", minimum=0, maximum=10, step=0.1, value=5.0
|
90 |
)
|
91 |
+
denoise_steps = gr.Slider(
|
92 |
+
label="Denoise Steps", minimum=25, maximum=250, step=25, value=50
|
93 |
+
)
|
94 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
95 |
seed = gr.Slider(label="Seed", minimum=0,
|
96 |
maximum=2**64 - 1, step=1)
|
|
|
105 |
depth = gr.Image(label="Depth Image", type="filepath")
|
106 |
gr.Examples(
|
107 |
examples=[
|
108 |
+
["360 view of a large bedroom", "", 7.0, 50, 42, False]],
|
109 |
+
inputs=[prompt, negative_prompt, guidance_scale, denoise_steps, seed, randomize_seed],
|
110 |
outputs=[rgb, depth, generated_seed, html],
|
111 |
fn=predict,
|
112 |
cache_examples=True)
|
113 |
|
114 |
new_btn.click(
|
115 |
fn=predict,
|
116 |
+
inputs=[prompt, negative_prompt, guidance_scale, denoise_steps, seed, randomize_seed],
|
117 |
outputs=[rgb, depth, generated_seed, html],
|
118 |
)
|
119 |
|
120 |
+
block.launch()
|