takuma104 commited on
Commit
f860c91
β€’
1 Parent(s): 6182913
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +3 -3
  3. app.py +66 -0
  4. requirements.txt +7 -0
  5. sample_canny_hand.png +3 -0
  6. sample_pose_body.png +3 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Multi Controlnet
3
- emoji: πŸ’»
4
- colorFrom: blue
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.20.1
8
  app_file: app.py
 
1
  ---
2
  title: Multi Controlnet
3
+ emoji: 😻
4
+ colorFrom: green
5
+ colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.20.1
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
2
+ from diffusers import UniPCMultistepScheduler
3
+ import gradio as gr
4
+ import torch
5
+
6
+ # Constants
7
+ low_threshold = 100
8
+ high_threshold = 200
9
+
10
+ # Models
11
+ controlnet_pose = ControlNetModel.from_pretrained(
12
+ "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16
13
+ )
14
+ controlnet_canny = ControlNetModel.from_pretrained(
15
+ "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
16
+ )
17
+
18
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
19
+ "runwayml/stable-diffusion-v1-5",
20
+ controlnet=[controlnet_pose,controlnet_canny],
21
+ safety_checker=None, torch_dtype=torch.float16
22
+ )
23
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
24
+
25
+ # This command loads the individual model components on GPU on-demand. So, we don't
26
+ # need to explicitly call pipe.to("cuda").
27
+ pipe.enable_model_cpu_offload()
28
+
29
+ # xformers
30
+ pipe.enable_xformers_memory_efficient_attention()
31
+
32
+ # Generator seed,
33
+ generator = torch.manual_seed(0)
34
+
35
+ def generate_images(pose_image, canny_image, prompt):
36
+ output = pipe(
37
+ prompt,
38
+ [pose_image, canny_image],
39
+ generator=generator,
40
+ num_images_per_prompt=3,
41
+ num_inference_steps=20,
42
+ )
43
+ all_outputs = []
44
+ all_outputs.append(pose_image, canny_image)
45
+ for image in output.images:
46
+ all_outputs.append(image)
47
+ return all_outputs
48
+
49
+
50
+ gr.Interface(
51
+ generate_images,
52
+ inputs=[
53
+ gr.Image(type="pil"),
54
+ gr.Image(type="pil"),
55
+ gr.Textbox(
56
+ label="Enter your prompt",
57
+ max_lines=1,
58
+ placeholder="best quality, extremely detailed",
59
+ ),
60
+ ],
61
+ outputs=gr.Gallery().style(grid=[2], height="auto"),
62
+ title="Generate controlled outputs with Mult-ControlNet and Stable Diffusion using πŸ€—Diffusers",
63
+ description="This Space uses pose lines and canny edged image as the additional conditioning. Please refer to the \"Examples\" for what kind of images are appropriate.",
64
+ examples=[["sample_pose_body.png", "sample_canny_hand.png", "best quality, extremely detailed"]],
65
+ allow_flagging=False,
66
+ ).launch(enable_queue=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/takuma104/diffusers.git@multi-controlnet-ext
2
+ transformers
3
+ git+https://github.com/huggingface/accelerate
4
+ torch==1.13.1
5
+ numpy
6
+ #controlnet_aux
7
+ xformers==0.0.16
sample_canny_hand.png ADDED

Git LFS Details

  • SHA256: af171e4da6b0c5aaae032c4371a8e1267e196121a250e4b15c0a4b120da19a81
  • Pointer size: 130 Bytes
  • Size of remote file: 23.6 kB
sample_pose_body.png ADDED

Git LFS Details

  • SHA256: b4b1cf935d6e8e41e9c85ed7c9290d9e4bc8fc8c5091e5a39f86d7a7aedc6e20
  • Pointer size: 131 Bytes
  • Size of remote file: 378 kB