clementchadebec
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -98,6 +98,69 @@ image = pipe(
|
|
98 |
<img style="width:400px;" src="images/corgi.jpg">
|
99 |
</p>
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
# Training Details
|
102 |
The model was trained for 20k iterations on 4 H100 GPUs (representing approximately a total of 176 GPU hours of training). Please refer to the [paper](http://arxiv.org/abs/2406.02347) for further parameters details.
|
103 |
|
|
|
98 |
<img style="width:400px;" src="images/corgi.jpg">
|
99 |
</p>
|
100 |
|
101 |
+
# Combining Flash Diffusion with Existing ControlNets 🎨
|
102 |
+
|
103 |
+
FlashSDXL can also be combined with existing ControlNets to unlock few steps generation in a **training free** manner. It can be integrated straight to Hugging Face pipelines. See an example below.
|
104 |
+
|
105 |
+
```python
|
106 |
+
import torch
|
107 |
+
import cv2
|
108 |
+
import numpy as np
|
109 |
+
from PIL import Image
|
110 |
+
|
111 |
+
from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, LCMScheduler
|
112 |
+
from diffusers.utils import load_image, make_image_grid
|
113 |
+
|
114 |
+
adapter_id = "jasperai/flash-sdxl"
|
115 |
+
|
116 |
+
image = load_image(
|
117 |
+
"https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
|
118 |
+
).resize((1024, 1024))
|
119 |
+
|
120 |
+
image = np.array(image)
|
121 |
+
|
122 |
+
image = cv2.Canny(image, 100, 200)
|
123 |
+
image = image[:, :, None].repeat(3, 2)
|
124 |
+
canny_image = Image.fromarray(image)
|
125 |
+
|
126 |
+
# Load ControlNet
|
127 |
+
controlnet = ControlNetModel.from_pretrained(
|
128 |
+
"diffusers/controlnet-canny-sdxl-1.0",
|
129 |
+
torch_dtype=torch.float16,
|
130 |
+
variant="fp16"
|
131 |
+
)
|
132 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
133 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
134 |
+
controlnet=controlnet,
|
135 |
+
torch_dtype=torch.float16,
|
136 |
+
safety_checker=None,
|
137 |
+
variant="fp16"
|
138 |
+
).to("cuda")
|
139 |
+
|
140 |
+
# Set scheduler
|
141 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
142 |
+
|
143 |
+
# Load LoRA
|
144 |
+
pipe.load_lora_weights("jasperai/flash-sdxl")
|
145 |
+
pipe.fuse_lora()
|
146 |
+
|
147 |
+
generator = torch.manual_seed(0)
|
148 |
+
image = pipe(
|
149 |
+
"picture of the mona lisa",
|
150 |
+
image=canny_image,
|
151 |
+
num_inference_steps=4,
|
152 |
+
guidance_scale=0,
|
153 |
+
controlnet_conditioning_scale=0.5,
|
154 |
+
cross_attention_kwargs={"scale": 1},
|
155 |
+
generator=generator,
|
156 |
+
).images[0]
|
157 |
+
make_image_grid([canny_image, image], rows=1, cols=2)
|
158 |
+
```
|
159 |
+
<p align="center">
|
160 |
+
<img style="width:400px;" src="images/controlnet.jpg">
|
161 |
+
</p>
|
162 |
+
|
163 |
+
|
164 |
# Training Details
|
165 |
The model was trained for 20k iterations on 4 H100 GPUs (representing approximately a total of 176 GPU hours of training). Please refer to the [paper](http://arxiv.org/abs/2406.02347) for further parameters details.
|
166 |
|