renyuxi commited on
Commit
8321e61
1 Parent(s): 3aece2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -17
app.py CHANGED
@@ -3,6 +3,7 @@ import argparse
3
  import os
4
  import time
5
  from os import path
 
6
 
7
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
8
  os.environ["TRANSFORMERS_CACHE"] = cache_path
@@ -42,41 +43,48 @@ with gr.Blocks() as demo:
42
  with gr.Column():
43
  with gr.Row():
44
  with gr.Column():
 
 
 
45
  num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
46
  steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
 
47
  eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
48
  controlnet_scale = gr.Number(label="ControlNet Conditioning Scale", value=1.0, interactive=True)
49
- prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
50
  seed = gr.Number(label="Seed", value=3413, interactive=True)
51
- scribble = gr.ImageEditor(height=768, width=768, type="pil")
52
  btn = gr.Button(value="run")
 
53
  with gr.Column():
54
- output = gr.Gallery(height=768)
 
55
 
56
- @spaces.GPU
57
- def process_image(steps, prompt, controlnet_scale, eta, seed, scribble, num_images):
58
- global pipe
 
59
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16), timer("inference"):
60
- return pipe(
61
  prompt=[prompt]*num_images,
62
- image=[scribble['composite'].resize((512, 512))]*num_images,
63
  generator=torch.Generator().manual_seed(int(seed)),
64
  num_inference_steps=steps,
65
  guidance_scale=0.,
66
  eta=eta,
67
- controlnet_conditioning_scale=float(controlnet_scale)
68
  ).images
 
 
 
 
 
69
 
70
- reactive_controls = [steps, prompt, controlnet_scale, eta, seed, scribble, num_images]
71
 
72
- for control in reactive_controls:
73
- control.change(fn=process_image, inputs=reactive_controls, outputs=[output])
 
74
 
75
- btn.click(process_image, inputs=reactive_controls, outputs=[output])
76
 
77
  if __name__ == "__main__":
78
- # parser = argparse.ArgumentParser()
79
- # parser.add_argument("--port", default=7891, type=int)
80
- # args = parser.parse_args()
81
- # demo.launch(server_name="0.0.0.0", server_port=args.port)
82
  demo.launch()
 
3
  import os
4
  import time
5
  from os import path
6
+ from PIL import ImageOps
7
 
8
  cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
9
  os.environ["TRANSFORMERS_CACHE"] = cache_path
 
43
  with gr.Column():
44
  with gr.Row():
45
  with gr.Column():
46
+ # scribble = gr.Image(source="canvas", tool="color-sketch", shape=(512, 512), height=768, width=768, type="pil")
47
+ scribble = gr.ImageEditor(type="pil", image_mode="L", crop_size=(512, 512), sources=(), brush=gr.Brush(color_mode="fixed", colors=["#FFFFFF"]))
48
+ # scribble_out = gr.Image(height=384, width=384)
49
  num_images = gr.Slider(label="Number of Images", minimum=1, maximum=8, step=1, value=4, interactive=True)
50
  steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
51
+ prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
52
  eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
53
  controlnet_scale = gr.Number(label="ControlNet Conditioning Scale", value=1.0, interactive=True)
 
54
  seed = gr.Number(label="Seed", value=3413, interactive=True)
 
55
  btn = gr.Button(value="run")
56
+
57
  with gr.Column():
58
+ output = gr.Gallery(height=768, format="png")
59
+ # output = gr.Image()
60
 
61
+ @spaces.GPU
62
+ def process_image(steps, prompt, controlnet_scale, eta, seed, scribble, num_images):
63
+ global pipe
64
+ if scribble:
65
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.float16), timer("inference"):
66
+ result = pipe(
67
  prompt=[prompt]*num_images,
68
+ image=[ImageOps.invert(scribble['composite'])]*num_images,
69
  generator=torch.Generator().manual_seed(int(seed)),
70
  num_inference_steps=steps,
71
  guidance_scale=0.,
72
  eta=eta,
73
+ controlnet_conditioning_scale=float(controlnet_scale),
74
  ).images
75
+ # result[0].save("test.jpg")
76
+ # print(result[0])
77
+ return result
78
+ else:
79
+ return None
80
 
81
+ reactive_controls = [steps, prompt, controlnet_scale, eta, seed, scribble, num_images]
82
 
83
+ for control in reactive_controls:
84
+ if reactive_controls[-2] is not None:
85
+ control.change(fn=process_image, inputs=reactive_controls, outputs=[output, ])
86
 
87
+ btn.click(process_image, inputs=reactive_controls, outputs=[output, ])
88
 
89
  if __name__ == "__main__":
 
 
 
 
90
  demo.launch()