Spaces:
Runtime error
Runtime error
update app
Browse files- app.py +35 -10
- c2_clip.png β c11_clip.png +2 -2
- p2_clip.png β c12_clip.png +2 -2
- c13_clip.png +3 -0
- p11_clip.png +3 -0
- p12_clip.png +3 -0
- p13_clip.png +3 -0
- requirements.txt +1 -1
app.py
CHANGED
@@ -2,6 +2,7 @@ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
|
2 |
from diffusers import UniPCMultistepScheduler
|
3 |
import gradio as gr
|
4 |
import torch
|
|
|
5 |
|
6 |
# Models
|
7 |
controlnet_pose = ControlNetModel.from_pretrained(
|
@@ -12,7 +13,7 @@ controlnet_canny = ControlNetModel.from_pretrained(
|
|
12 |
)
|
13 |
|
14 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
15 |
-
"
|
16 |
controlnet=[controlnet_pose, controlnet_canny],
|
17 |
safety_checker=None, torch_dtype=torch.float16
|
18 |
).to('cuda')
|
@@ -26,13 +27,30 @@ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
26 |
pipe.enable_xformers_memory_efficient_attention()
|
27 |
|
28 |
# Generator seed,
|
29 |
-
generator = torch.manual_seed(
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def generate_images(pose_image, canny_image, prompt):
|
33 |
output = pipe(
|
34 |
-
prompt,
|
35 |
-
|
|
|
36 |
generator=generator,
|
37 |
num_images_per_prompt=3,
|
38 |
num_inference_steps=20,
|
@@ -53,16 +71,23 @@ gr.Interface(
|
|
53 |
gr.Textbox(
|
54 |
label="Enter your prompt",
|
55 |
max_lines=1,
|
56 |
-
placeholder="
|
57 |
),
|
58 |
],
|
59 |
outputs=gr.Gallery().style(grid=[2], height="auto"),
|
60 |
-
|
61 |
-
description="This Space uses pose lines and canny edged image as the additional conditioning. Please refer to the \"Examples\" for what kind of images are appropriate.",
|
62 |
examples=[
|
63 |
-
["
|
64 |
-
"
|
65 |
-
"masterpiece, a professional portrait of woman wearing white shirts"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
],
|
67 |
],
|
68 |
allow_flagging=False,
|
|
|
2 |
from diffusers import UniPCMultistepScheduler
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
+
from gradio.components import Markdown
|
6 |
|
7 |
# Models
|
8 |
controlnet_pose = ControlNetModel.from_pretrained(
|
|
|
13 |
)
|
14 |
|
15 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
16 |
+
"dreamlike-art/dreamlike-anime-1.0",
|
17 |
controlnet=[controlnet_pose, controlnet_canny],
|
18 |
safety_checker=None, torch_dtype=torch.float16
|
19 |
).to('cuda')
|
|
|
27 |
pipe.enable_xformers_memory_efficient_attention()
|
28 |
|
29 |
# Generator seed,
|
30 |
+
generator = torch.manual_seed(0)
|
31 |
|
32 |
+
negative_prompt = ("worst quality, low quality, lowres, bad anatomy, bad hands, "
|
33 |
+
"missing fingers, extra digit, fewer digits")
|
34 |
+
|
35 |
+
markdown = """
|
36 |
+
## Generate controlled outputs with Mult-ControlNet and Stable Diffusion using π€Diffusers
|
37 |
+
|
38 |
+
This Space uses pose lines and canny edged image as the additional conditioning. Please refer to the "Examples" for what kind of images are appropriate.
|
39 |
+
The Followings are tools available to create such images. In this example, tool 1 is being used.
|
40 |
+
1. [Character bones that look like Openpose for blender Ver4.7 Depth+Canny](https://toyxyz.gumroad.com/l/ciojz)
|
41 |
+
2. [open-pose-editor](https://github.com/ZhUyU1997/open-pose-editor)
|
42 |
+
|
43 |
+
"""
|
44 |
+
# **This space using these models**:
|
45 |
+
# - ControlNet Model (canny): [lllyasviel/sd-controlnet-canny](https://hf.co/lllyasviel/sd-controlnet-canny)
|
46 |
+
# - ControlNet Model (openpose): [lllyasviel/sd-controlnet-openpose](https://hf.co/lllyasviel/sd-controlnet-openpose)
|
47 |
+
# - SD Base Model: [dreamlike-art/dreamlike-anime-1.0](https://hf.co/dreamlike-art/dreamlike-anime-1.0)
|
48 |
|
49 |
def generate_images(pose_image, canny_image, prompt):
|
50 |
output = pipe(
|
51 |
+
prompt=prompt,
|
52 |
+
negative_prompt=negative_prompt,
|
53 |
+
image=[pose_image, canny_image],
|
54 |
generator=generator,
|
55 |
num_images_per_prompt=3,
|
56 |
num_inference_steps=20,
|
|
|
71 |
gr.Textbox(
|
72 |
label="Enter your prompt",
|
73 |
max_lines=1,
|
74 |
+
placeholder="Enter your prompt",
|
75 |
),
|
76 |
],
|
77 |
outputs=gr.Gallery().style(grid=[2], height="auto"),
|
78 |
+
description=markdown,
|
|
|
79 |
examples=[
|
80 |
+
["p11_clip.png",
|
81 |
+
"c11_clip.png",
|
82 |
+
"masterpiece, a professional portrait of woman wearing white shirts, smile, heart hand sign"
|
83 |
+
],
|
84 |
+
# ["p13_clip.png",
|
85 |
+
# "c13_clip.png",
|
86 |
+
# "masterpiece, a professional portrait of woman wearing white shirts, smile"
|
87 |
+
# ],
|
88 |
+
["p12_clip.png",
|
89 |
+
"c12_clip.png",
|
90 |
+
"masterpiece, a professional portrait of woman wearing white shirts, smile"
|
91 |
],
|
92 |
],
|
93 |
allow_flagging=False,
|
c2_clip.png β c11_clip.png
RENAMED
File without changes
|
p2_clip.png β c12_clip.png
RENAMED
File without changes
|
c13_clip.png
ADDED
Git LFS Details
|
p11_clip.png
ADDED
Git LFS Details
|
p12_clip.png
ADDED
Git LFS Details
|
p13_clip.png
ADDED
Git LFS Details
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
git+https://github.com/
|
2 |
transformers
|
3 |
git+https://github.com/huggingface/accelerate
|
4 |
torch==1.13.1
|
|
|
1 |
+
git+https://github.com/huggingface/diffusers
|
2 |
transformers
|
3 |
git+https://github.com/huggingface/accelerate
|
4 |
torch==1.13.1
|