Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -41,24 +41,24 @@ with gr.Blocks() as demo:
|
|
41 |
with gr.Column():
|
42 |
with gr.Row():
|
43 |
with gr.Column():
|
44 |
-
num_images = gr.Slider(label="Number of Images", minimum=1, maximum=2, step=1, value=1, interactive=True)
|
45 |
height = gr.Number(label="Image Height", value=1024, interactive=True)
|
46 |
width = gr.Number(label="Image Width", value=1024, interactive=True)
|
47 |
steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8, interactive=True)
|
48 |
-
scales = gr.
|
49 |
# eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
|
50 |
prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
|
51 |
seed = gr.Number(label="Seed", value=3413, interactive=True)
|
52 |
btn = gr.Button(value="run")
|
53 |
with gr.Column():
|
54 |
-
output = gr.Gallery(height=
|
55 |
|
56 |
@spaces.GPU
|
57 |
-
def process_image(
|
58 |
global pipe
|
59 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
60 |
return pipe(
|
61 |
-
prompt=[prompt]
|
62 |
generator=torch.Generator().manual_seed(int(seed)),
|
63 |
num_inference_steps=steps,
|
64 |
guidance_scale=scales,
|
@@ -66,7 +66,7 @@ with gr.Blocks() as demo:
|
|
66 |
width=int(width)
|
67 |
).images
|
68 |
|
69 |
-
reactive_controls = [
|
70 |
|
71 |
# for control in reactive_controls:
|
72 |
# control.change(fn=process_image, inputs=reactive_controls, outputs=[output])
|
|
|
41 |
with gr.Column():
|
42 |
with gr.Row():
|
43 |
with gr.Column():
|
44 |
+
# num_images = gr.Slider(label="Number of Images", minimum=1, maximum=2, step=1, value=1, interactive=True)
|
45 |
height = gr.Number(label="Image Height", value=1024, interactive=True)
|
46 |
width = gr.Number(label="Image Width", value=1024, interactive=True)
|
47 |
steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8, interactive=True)
|
48 |
+
scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, value=3.5, interactive=True)
|
49 |
# eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
|
50 |
prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
|
51 |
seed = gr.Number(label="Seed", value=3413, interactive=True)
|
52 |
btn = gr.Button(value="run")
|
53 |
with gr.Column():
|
54 |
+
output = gr.Gallery(height=768)
|
55 |
|
56 |
@spaces.GPU
|
57 |
+
def process_image(height, width, steps, scales, prompt, seed):
|
58 |
global pipe
|
59 |
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
|
60 |
return pipe(
|
61 |
+
prompt=[prompt],
|
62 |
generator=torch.Generator().manual_seed(int(seed)),
|
63 |
num_inference_steps=steps,
|
64 |
guidance_scale=scales,
|
|
|
66 |
width=int(width)
|
67 |
).images
|
68 |
|
69 |
+
reactive_controls = [height, width, steps, scales, prompt, seed]
|
70 |
|
71 |
# for control in reactive_controls:
|
72 |
# control.change(fn=process_image, inputs=reactive_controls, outputs=[output])
|