YiYiXu commited on
Commit
d19a853
1 Parent(s): 6f4bd66

add diffusers weights + code example

Browse files
Files changed (3) hide show
  1. README.md +46 -0
  2. config.json +21 -0
  3. diffusion_pytorch_model.safetensors +3 -0
README.md CHANGED
@@ -91,6 +91,52 @@ Which should give you an image like below:
91
 
92
  ![An adorable fluffy pastel creature](sample_result.png)
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  ### Preprocessing
96
 
 
91
 
92
  ![An adorable fluffy pastel creature](sample_result.png)
93
 
94
+ ### Using Controlnets in Diffusers
95
+
96
+ Make sure you upgrade to the latest version of diffusers: `pip install -U diffusers`. And then you can run:
97
+
98
+ ```python
99
+ import torch
100
+ from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel
101
+ from diffusers.utils import load_image
102
+ from diffusers.image_processor import VaeImageProcessor
103
+
104
+ class SD3CannyImageProcessor(VaeImageProcessor):
105
+ def __init__(self):
106
+ super().__init__(do_normalize=False)
107
+ def preprocess(self, image, **kwargs):
108
+ image = super().preprocess(image, **kwargs)
109
+ image = image * 255 * 0.5 + 0.5
110
+ return image
111
+ def postprocess(self, image, do_denormalize=True, **kwargs):
112
+ do_denormalize = [True] * image.shape[0]
113
+ image = super().postprocess(image, **kwargs, do_denormalize=do_denormalize)
114
+ return image
115
+
116
+ controlnet = SD3ControlNetModel.from_pretrained("stabilityai/stable-diffusion-3.5-large-controlnet-canny", torch_dtype=torch.float16)
117
+ pipe = StableDiffusion3ControlNetPipeline.from_pretrained(
118
+ "stabilityai/stable-diffusion-3.5-large",
119
+ controlnet=controlnet,
120
+ torch_dtype=torch.float16
121
+ ).to("cuda")
122
+ pipe.image_processor = SD3CannyImageProcessor()
123
+
124
+ control_image = load_image("https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/canny.png")
125
+ prompt = "A Night time photo taken by Leica M11, portrait of a Japanese woman in a kimono, looking at the camera, Cherry blossoms"
126
+
127
+ generator = torch.Generator(device="cpu").manual_seed(0)
128
+ image = pipe(
129
+ prompt,
130
+ control_image=control_image,
131
+ controlnet_conditioning_scale=1.0,
132
+ guidance_scale=3.5,
133
+ num_inference_steps=60,
134
+ generator=generator,
135
+ max_sequence_length=77,
136
+ ).images[0]
137
+ image.save(f'canny-8b.jpg')
138
+
139
+ ```
140
 
141
  ### Preprocessing
142
 
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "SD3ControlNetModel",
3
+ "_diffusers_version": "0.32.0.dev0",
4
+ "attention_head_dim": 64,
5
+ "caption_projection_dim": 2048,
6
+ "dual_attention_layers": [],
7
+ "extra_conditioning_channels": 0,
8
+ "force_zeros_for_pooled_projection": false,
9
+ "in_channels": 16,
10
+ "joint_attention_dim": null,
11
+ "num_attention_heads": 38,
12
+ "num_layers": 19,
13
+ "out_channels": 16,
14
+ "patch_size": 2,
15
+ "pooled_projection_dim": 2048,
16
+ "pos_embed_max_size": null,
17
+ "pos_embed_type": null,
18
+ "qk_norm": null,
19
+ "sample_size": 128,
20
+ "use_pos_embed": false
21
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f2608a8af55f223250398e04e3c497aa194ddbcb70049f556194c4a2d333865
3
+ size 8614110992