AlekseyCalvin commited on
Commit
376a57e
1 Parent(s): 1743227

Upload 6 files

Browse files
Files changed (6) hide show
  1. custom_pipeline.py +210 -0
  2. env (1).py +98 -0
  3. live_preview_helpers (2).py +166 -0
  4. mod (1).py +360 -0
  5. open_flux.py +222 -0
  6. pipeline (2).py +796 -0
custom_pipeline.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FlowMatchEulerDiscreteScheduler
4
+ from diffusers import FluxPipeline
5
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
6
+ from typing import Any, Callable, Dict, List, Optional, Union
7
+ from PIL import Image
8
+ from diffusers.pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps
9
+
10
+ from diffusers.utils import is_torch_xla_available
11
+
12
+ if is_torch_xla_available():
13
+ import torch_xla.core.xla_model as xm
14
+
15
+ XLA_AVAILABLE = True
16
+ else:
17
+ XLA_AVAILABLE = False
18
+
19
+
20
+ # Constants for shift calculation
21
+ BASE_SEQ_LEN = 256
22
+ MAX_SEQ_LEN = 4096
23
+ BASE_SHIFT = 0.5
24
+ MAX_SHIFT = 1.2
25
+
26
+ # Helper functions
27
+ def calculate_timestep_shift(image_seq_len: int) -> float:
28
+ """Calculates the timestep shift (mu) based on the image sequence length."""
29
+ m = (MAX_SHIFT - BASE_SHIFT) / (MAX_SEQ_LEN - BASE_SEQ_LEN)
30
+ b = BASE_SHIFT - m * BASE_SEQ_LEN
31
+ mu = image_seq_len * m + b
32
+ return mu
33
+
34
+ def prepare_timesteps(
35
+ scheduler: FlowMatchEulerDiscreteScheduler,
36
+ num_inference_steps: Optional[int] = None,
37
+ device: Optional[Union[str, torch.device]] = None,
38
+ timesteps: Optional[List[int]] = None,
39
+ sigmas: Optional[List[float]] = None,
40
+ mu: Optional[float] = None,
41
+ ) -> (torch.Tensor, int):
42
+ """Prepares the timesteps for the diffusion process."""
43
+ if timesteps is not None and sigmas is not None:
44
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed.")
45
+
46
+ if timesteps is not None:
47
+ scheduler.set_timesteps(timesteps=timesteps, device=device)
48
+ elif sigmas is not None:
49
+ scheduler.set_timesteps(sigmas=sigmas, device=device)
50
+ else:
51
+ scheduler.set_timesteps(num_inference_steps, device=device, mu=mu)
52
+
53
+ timesteps = scheduler.timesteps
54
+ num_inference_steps = len(timesteps)
55
+ return timesteps, num_inference_steps
56
+
57
+ # FLUX pipeline function
58
+ class FluxWithCFGPipeline(FluxPipeline):
59
+
60
+ @torch.inference_mode()
61
+ def generate_image(
62
+ self,
63
+ prompt: Union[str, List[str]] = None,
64
+ prompt_2: Optional[Union[str, List[str]]] = None,
65
+ height: Optional[int] = None,
66
+ width: Optional[int] = None,
67
+ negative_prompt: Optional[Union[str, List[str]]] = None,
68
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
69
+ num_inference_steps: int = 4,
70
+ timesteps: List[int] = None,
71
+ guidance_scale: float = 3.5,
72
+ num_images_per_prompt: Optional[int] = 1,
73
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
74
+ latents: Optional[torch.FloatTensor] = None,
75
+ prompt_embeds: Optional[torch.FloatTensor] = None,
76
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
77
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
78
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
79
+ output_type: Optional[str] = "pil",
80
+ return_dict: bool = True,
81
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
82
+ max_sequence_length: int = 300,
83
+ ):
84
+ height = height or self.default_sample_size * self.vae_scale_factor
85
+ width = width or self.default_sample_size * self.vae_scale_factor
86
+
87
+ # 1. Check inputs
88
+ self.check_inputs(
89
+ prompt,
90
+ prompt_2,
91
+ negative_prompt,
92
+ height,
93
+ width,
94
+ prompt_embeds=prompt_embeds,
95
+ pooled_prompt_embeds=pooled_prompt_embeds,
96
+ max_sequence_length=max_sequence_length,
97
+ )
98
+
99
+ self._guidance_scale = guidance_scale
100
+ self._joint_attention_kwargs = joint_attention_kwargs
101
+ self._interrupt = False
102
+
103
+ # 2. Define call parameters
104
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
105
+ device = "cuda" if torch.cuda.is_available() else "cpu"
106
+
107
+ # 3. Encode prompt
108
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
109
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
110
+ prompt=prompt,
111
+ prompt_2=prompt_2,
112
+ prompt_embeds=prompt_embeds,
113
+ pooled_prompt_embeds=pooled_prompt_embeds,
114
+ device=device,
115
+ num_images_per_prompt=num_images_per_prompt,
116
+ max_sequence_length=max_sequence_length,
117
+ lora_scale=lora_scale,
118
+ )
119
+ negative_prompt_embeds, negative_pooled_prompt_embeds, negative_text_ids = self.encode_prompt(
120
+ prompt=negative_prompt,
121
+ prompt_2=negative_prompt_2,
122
+ prompt_embeds=negative_prompt_embeds,
123
+ pooled_prompt_embeds=negative_pooled_prompt_embeds,
124
+ device=device,
125
+ num_images_per_prompt=num_images_per_prompt,
126
+ max_sequence_length=max_sequence_length,
127
+ lora_scale=lora_scale,
128
+ )
129
+
130
+ # 4. Prepare latent variables
131
+ num_channels_latents = self.transformer.config.in_channels // 4
132
+ latents, latent_image_ids = self.prepare_latents(
133
+ batch_size * num_images_per_prompt,
134
+ num_channels_latents,
135
+ height,
136
+ width,
137
+ prompt_embeds.dtype,
138
+ negative_prompt_embeds.dtype,
139
+ device,
140
+ generator,
141
+ latents,
142
+ )
143
+
144
+ # 5. Prepare timesteps
145
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
146
+ image_seq_len = latents.shape[1]
147
+ mu = calculate_timestep_shift(image_seq_len)
148
+ timesteps, num_inference_steps = prepare_timesteps(
149
+ self.scheduler,
150
+ num_inference_steps,
151
+ device,
152
+ timesteps,
153
+ sigmas,
154
+ mu=mu,
155
+ )
156
+ self._num_timesteps = len(timesteps)
157
+
158
+ # Handle guidance
159
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float16).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
160
+
161
+ # 6. Denoising loop
162
+ for i, t in enumerate(timesteps):
163
+ if self.interrupt:
164
+ continue
165
+
166
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
167
+
168
+ noise_pred = self.transformer(
169
+ hidden_states=latents,
170
+ timestep=timestep / 1000,
171
+ guidance=guidance,
172
+ pooled_projections=pooled_prompt_embeds,
173
+ encoder_hidden_states=prompt_embeds,
174
+ txt_ids=text_ids,
175
+ img_ids=latent_image_ids,
176
+ joint_attention_kwargs=self.joint_attention_kwargs,
177
+ return_dict=False,
178
+ )[0]
179
+
180
+ noise_pred_uncond = self.transformer(
181
+ hidden_states=latents,
182
+ timestep=timestep / 1000,
183
+ guidance=guidance,
184
+ pooled_projections=negative_pooled_prompt_embeds,
185
+ encoder_hidden_states=negative_prompt_embeds,
186
+ txt_ids=negative_text_ids,
187
+ img_ids=latent_image_ids,
188
+ joint_attention_kwargs=self.joint_attention_kwargs,
189
+ return_dict=False,
190
+ )[0]
191
+
192
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
193
+
194
+ latents_dtype = latents.dtype
195
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
196
+ # Yield intermediate result
197
+ torch.cuda.empty_cache()
198
+
199
+ # Final image
200
+ return self._decode_latents_to_image(latents, height, width, output_type)
201
+ self.maybe_free_model_hooks()
202
+ torch.cuda.empty_cache()
203
+
204
+ def _decode_latents_to_image(self, latents, height, width, output_type, vae=None):
205
+ """Decodes the given latents into an image."""
206
+ vae = vae or self.vae
207
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
208
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
209
+ image = vae.decode(latents, return_dict=False)[0]
210
+ return self.image_processor.postprocess(image, output_type=output_type)[0]
env (1).py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
5
+ HF_TOKEN = os.environ.get("HF_TOKEN")
6
+ hf_read_token = os.environ.get('HF_READ_TOKEN') # only use for private repo
7
+
8
+
9
+ num_loras = 3
10
+ num_cns = 2
11
+
12
+
13
+ models = [
14
+ "camenduru/FLUX.1-dev-diffusers",
15
+ "black-forest-labs/FLUX.1-schnell",
16
+ "sayakpaul/FLUX.1-merged",
17
+ "ostris/OpenFLUX.1",
18
+ "multimodalart/FLUX.1-dev2pro-full",
19
+ "John6666/flux1-dev-minus-v1-fp8-flux",
20
+ "John6666/hyper-flux1-dev-fp8-flux",
21
+ "John6666/blue-pencil-flux1-v021-fp8-flux",
22
+ "Raelina/Raemu-Flux",
23
+ "John6666/raemu-flux-v10-fp8-flux",
24
+ "John6666/copycat-flux-test-fp8-v11-fp8-flux",
25
+ "John6666/wai-ani-flux-v10forfp8-fp8-flux",
26
+ "John6666/flux-dev8-anime-nsfw-fp8-flux",
27
+ "John6666/nepotism-fuxdevschnell-v3aio-fp8-flux",
28
+ "John6666/sumeshi-flux1s-v002e-fp8-flux",
29
+ "John6666/fca-style-v33-x10-8step-fp8-flux",
30
+ "John6666/lyh-anime-v10f1-fp8-flux",
31
+ "John6666/lyh-dalle-anime-v12dalle-fp8-flux",
32
+ "John6666/lyh-anime-flux-v2a1-fp8-flux",
33
+ "John6666/glimmerkin-flux-cute-v10-fp8-flux",
34
+ "John6666/niji-style-flux-devfp8-fp8-flux",
35
+ "John6666/niji56-style-v3-fp8-flux",
36
+ "John6666/xe-anime-flux-v04-fp8-flux",
37
+ "John6666/xe-figure-flux-01-fp8-flux",
38
+ "John6666/xe-pixel-flux-01-fp8-flux",
39
+ "John6666/xe-guoman-flux-02-fp8-flux",
40
+ "John6666/carnival-unchained-v10-fp8-flux",
41
+ "John6666/real-flux-10b-schnell-fp8-flux",
42
+ "John6666/fluxunchained-artfulnsfw-fut516xfp8e4m3fnv11-fp8-flux",
43
+ "John6666/fastflux-unchained-t5f16-fp8-flux",
44
+ "John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux",
45
+ "John6666/nsfw-master-flux-lora-merged-with-flux1-dev-fp16-v10-fp8-flux",
46
+ "John6666/the-araminta-flux1a1-fp8-flux",
47
+ "John6666/acorn-is-spinning-flux-v11-fp8-flux",
48
+ "John6666/stoiqo-afrodite-fluxxl-f1dalpha-fp8-flux",
49
+ "John6666/real-horny-pro-fp8-flux",
50
+ "John6666/centerfold-flux-v20fp8e5m2-fp8-flux",
51
+ "John6666/jib-mix-flux-v208stephyper-fp8-flux",
52
+ "John6666/sapianf-nude-men-women-for-flux-v20fp16-fp8-flux",
53
+ "John6666/flux-asian-realistic-v10-fp8-flux",
54
+ "John6666/fluxasiandoll-v10-fp8-flux",
55
+ "John6666/xe-asian-flux-01-fp8-flux",
56
+ "John6666/fluxescore-dev-v10fp16-fp8-flux",
57
+ # "",
58
+ ]
59
+
60
+ model_trigger = {
61
+ "Raelina/Raemu-Flux": "anime",
62
+ "John6666/raemu-flux-v10-fp8-flux": "anime",
63
+ "John6666/fca-style-v33-x10-8step-fp8-flux": "fca_style",
64
+ }
65
+
66
+ # List all Models for specified user
67
+ HF_MODEL_USER_LIKES = [] # sorted by number of likes
68
+ HF_MODEL_USER_EX = [] # sorted by a special rule
69
+
70
+
71
+
72
+ # - **Download Models**
73
+ download_model_list = [
74
+ ]
75
+
76
+ # - **Download VAEs**
77
+ download_vae_list = [
78
+ ]
79
+
80
+ # - **Download LoRAs**
81
+ download_lora_list = [
82
+ ]
83
+
84
+ DIFFUSERS_FORMAT_LORAS = []
85
+
86
+ directory_models = 'models'
87
+ os.makedirs(directory_models, exist_ok=True)
88
+ directory_loras = 'loras'
89
+ os.makedirs(directory_loras, exist_ok=True)
90
+ directory_vaes = 'vaes'
91
+ os.makedirs(directory_vaes, exist_ok=True)
92
+
93
+
94
+ HF_LORA_PRIVATE_REPOS1 = []
95
+ HF_LORA_PRIVATE_REPOS2 = [] # to be sorted as 1 repo
96
+ HF_LORA_PRIVATE_REPOS = HF_LORA_PRIVATE_REPOS1 + HF_LORA_PRIVATE_REPOS2
97
+ HF_LORA_ESSENTIAL_PRIVATE_REPO = '' # to be downloaded on run app
98
+ HF_VAE_PRIVATE_REPO = ''
live_preview_helpers (2).py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ # FLUX pipeline function
43
+ @torch.inference_mode()
44
+ def flux_pipe_call_that_returns_an_iterable_of_images(
45
+ self,
46
+ prompt: Union[str, List[str]] = None,
47
+ prompt_2: Optional[Union[str, List[str]]] = None,
48
+ height: Optional[int] = None,
49
+ width: Optional[int] = None,
50
+ num_inference_steps: int = 28,
51
+ timesteps: List[int] = None,
52
+ guidance_scale: float = 3.5,
53
+ num_images_per_prompt: Optional[int] = 1,
54
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
55
+ latents: Optional[torch.FloatTensor] = None,
56
+ prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
58
+ output_type: Optional[str] = "pil",
59
+ return_dict: bool = True,
60
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
61
+ max_sequence_length: int = 512,
62
+ good_vae: Optional[Any] = None,
63
+ ):
64
+ height = height or self.default_sample_size * self.vae_scale_factor
65
+ width = width or self.default_sample_size * self.vae_scale_factor
66
+
67
+ # 1. Check inputs
68
+ self.check_inputs(
69
+ prompt,
70
+ prompt_2,
71
+ height,
72
+ width,
73
+ prompt_embeds=prompt_embeds,
74
+ pooled_prompt_embeds=pooled_prompt_embeds,
75
+ max_sequence_length=max_sequence_length,
76
+ )
77
+
78
+ self._guidance_scale = guidance_scale
79
+ self._joint_attention_kwargs = joint_attention_kwargs
80
+ self._interrupt = False
81
+
82
+ # 2. Define call parameters
83
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
84
+ device = self._execution_device
85
+
86
+ # 3. Encode prompt
87
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
88
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
89
+ prompt=prompt,
90
+ prompt_2=prompt_2,
91
+ prompt_embeds=prompt_embeds,
92
+ pooled_prompt_embeds=pooled_prompt_embeds,
93
+ device=device,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ max_sequence_length=max_sequence_length,
96
+ lora_scale=lora_scale,
97
+ )
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+ # 5. Prepare timesteps
111
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
112
+ image_seq_len = latents.shape[1]
113
+ mu = calculate_shift(
114
+ image_seq_len,
115
+ self.scheduler.config.base_image_seq_len,
116
+ self.scheduler.config.max_image_seq_len,
117
+ self.scheduler.config.base_shift,
118
+ self.scheduler.config.max_shift,
119
+ )
120
+ timesteps, num_inference_steps = retrieve_timesteps(
121
+ self.scheduler,
122
+ num_inference_steps,
123
+ device,
124
+ timesteps,
125
+ sigmas,
126
+ mu=mu,
127
+ )
128
+ self._num_timesteps = len(timesteps)
129
+
130
+ # Handle guidance
131
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
132
+
133
+ # 6. Denoising loop
134
+ for i, t in enumerate(timesteps):
135
+ if self.interrupt:
136
+ continue
137
+
138
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
139
+
140
+ noise_pred = self.transformer(
141
+ hidden_states=latents,
142
+ timestep=timestep / 1000,
143
+ guidance=guidance,
144
+ pooled_projections=pooled_prompt_embeds,
145
+ encoder_hidden_states=prompt_embeds,
146
+ txt_ids=text_ids,
147
+ img_ids=latent_image_ids,
148
+ joint_attention_kwargs=self.joint_attention_kwargs,
149
+ return_dict=False,
150
+ )[0]
151
+ # Yield intermediate result
152
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
153
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
154
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
155
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
156
+
157
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
158
+ torch.cuda.empty_cache()
159
+
160
+ # Final image using good_vae
161
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
162
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
163
+ image = good_vae.decode(latents, return_dict=False)[0]
164
+ self.maybe_free_model_hooks()
165
+ torch.cuda.empty_cache()
166
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
mod (1).py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import torch
4
+ from PIL import Image
5
+ from pathlib import Path
6
+ import gc
7
+ import subprocess
8
+ from env import num_cns, model_trigger
9
+
10
+
11
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
12
+ subprocess.run('pip cache purge', shell=True)
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+ torch.set_grad_enabled(False)
15
+
16
+
17
+ control_images = [None] * num_cns
18
+ control_modes = [-1] * num_cns
19
+ control_scales = [0] * num_cns
20
+
21
+
22
+ def is_repo_name(s):
23
+ import re
24
+ return re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', s)
25
+
26
+
27
+ def is_repo_exists(repo_id):
28
+ from huggingface_hub import HfApi
29
+ api = HfApi()
30
+ try:
31
+ if api.repo_exists(repo_id=repo_id): return True
32
+ else: return False
33
+ except Exception as e:
34
+ print(f"Error: Failed to connect {repo_id}.")
35
+ print(e)
36
+ return True # for safe
37
+
38
+
39
+ from translatepy import Translator
40
+ translator = Translator()
41
+ def translate_to_en(input: str):
42
+ try:
43
+ output = str(translator.translate(input, 'English'))
44
+ except Exception as e:
45
+ output = input
46
+ print(e)
47
+ return output
48
+
49
+
50
+ def clear_cache():
51
+ try:
52
+ torch.cuda.empty_cache()
53
+ #torch.cuda.reset_max_memory_allocated()
54
+ #torch.cuda.reset_peak_memory_stats()
55
+ gc.collect()
56
+ except Exception as e:
57
+ print(e)
58
+ raise Exception(f"Cache clearing error: {e}") from e
59
+
60
+
61
+ def get_repo_safetensors(repo_id: str):
62
+ from huggingface_hub import HfApi
63
+ api = HfApi()
64
+ try:
65
+ if not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(value="", choices=[])
66
+ files = api.list_repo_files(repo_id=repo_id)
67
+ except Exception as e:
68
+ print(f"Error: Failed to get {repo_id}'s info.")
69
+ print(e)
70
+ gr.Warning(f"Error: Failed to get {repo_id}'s info.")
71
+ return gr.update(choices=[])
72
+ files = [f for f in files if f.endswith(".safetensors")]
73
+ if len(files) == 0: return gr.update(value="", choices=[])
74
+ else: return gr.update(value=files[0], choices=files)
75
+
76
+
77
+ def expand2square(pil_img: Image.Image, background_color: tuple=(0, 0, 0)):
78
+ width, height = pil_img.size
79
+ if width == height:
80
+ return pil_img
81
+ elif width > height:
82
+ result = Image.new(pil_img.mode, (width, width), background_color)
83
+ result.paste(pil_img, (0, (width - height) // 2))
84
+ return result
85
+ else:
86
+ result = Image.new(pil_img.mode, (height, height), background_color)
87
+ result.paste(pil_img, ((height - width) // 2, 0))
88
+ return result
89
+
90
+
91
+ # https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny/blob/main/app.py
92
+ def resize_image(image, target_width, target_height, crop=True):
93
+ from image_datasets.canny_dataset import c_crop
94
+ if crop:
95
+ image = c_crop(image) # Crop the image to square
96
+ original_width, original_height = image.size
97
+
98
+ # Resize to match the target size without stretching
99
+ scale = max(target_width / original_width, target_height / original_height)
100
+ resized_width = int(scale * original_width)
101
+ resized_height = int(scale * original_height)
102
+
103
+ image = image.resize((resized_width, resized_height), Image.LANCZOS)
104
+
105
+ # Center crop to match the target dimensions
106
+ left = (resized_width - target_width) // 2
107
+ top = (resized_height - target_height) // 2
108
+ image = image.crop((left, top, left + target_width, top + target_height))
109
+ else:
110
+ image = image.resize((target_width, target_height), Image.LANCZOS)
111
+
112
+ return image
113
+
114
+
115
+ # https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union/blob/main/app.py
116
+ # https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Union
117
+ controlnet_union_modes = {
118
+ "None": -1,
119
+ #"scribble_hed": 0,
120
+ "canny": 0, # supported
121
+ "mlsd": 0, #supported
122
+ "tile": 1, #supported
123
+ "depth_midas": 2, # supported
124
+ "blur": 3, # supported
125
+ "openpose": 4, # supported
126
+ "gray": 5, # supported
127
+ "low_quality": 6, # supported
128
+ }
129
+
130
+
131
+ # https://github.com/pytorch/pytorch/issues/123834
132
+ def get_control_params():
133
+ from diffusers.utils import load_image
134
+ modes = []
135
+ images = []
136
+ scales = []
137
+ for i, mode in enumerate(control_modes):
138
+ if mode == -1 or control_images[i] is None: continue
139
+ modes.append(control_modes[i])
140
+ images.append(load_image(control_images[i]))
141
+ scales.append(control_scales[i])
142
+ return modes, images, scales
143
+
144
+
145
+ from preprocessor import Preprocessor
146
+ def preprocess_image(image: Image.Image, control_mode: str, height: int, width: int,
147
+ preprocess_resolution: int):
148
+ if control_mode == "None": return image
149
+ image_resolution = max(width, height)
150
+ image_before = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
151
+ # generated control_
152
+ print("start to generate control image")
153
+ preprocessor = Preprocessor()
154
+ if control_mode == "depth_midas":
155
+ preprocessor.load("Midas")
156
+ control_image = preprocessor(
157
+ image=image_before,
158
+ image_resolution=image_resolution,
159
+ detect_resolution=preprocess_resolution,
160
+ )
161
+ if control_mode == "openpose":
162
+ preprocessor.load("Openpose")
163
+ control_image = preprocessor(
164
+ image=image_before,
165
+ hand_and_face=True,
166
+ image_resolution=image_resolution,
167
+ detect_resolution=preprocess_resolution,
168
+ )
169
+ if control_mode == "canny":
170
+ preprocessor.load("Canny")
171
+ control_image = preprocessor(
172
+ image=image_before,
173
+ image_resolution=image_resolution,
174
+ detect_resolution=preprocess_resolution,
175
+ )
176
+
177
+ if control_mode == "mlsd":
178
+ preprocessor.load("MLSD")
179
+ control_image = preprocessor(
180
+ image=image_before,
181
+ image_resolution=image_resolution,
182
+ detect_resolution=preprocess_resolution,
183
+ )
184
+
185
+ if control_mode == "scribble_hed":
186
+ preprocessor.load("HED")
187
+ control_image = preprocessor(
188
+ image=image_before,
189
+ image_resolution=image_resolution,
190
+ detect_resolution=preprocess_resolution,
191
+ )
192
+
193
+ if control_mode == "low_quality" or control_mode == "gray" or control_mode == "blur" or control_mode == "tile":
194
+ control_image = image_before
195
+ image_width = 768
196
+ image_height = 768
197
+ else:
198
+ # make sure control image size is same as resized_image
199
+ image_width, image_height = control_image.size
200
+
201
+ image_after = resize_image(control_image, width, height, False)
202
+ ref_width, ref_height = image.size
203
+ print(f"generate control image success: {ref_width}x{ref_height} => {image_width}x{image_height}")
204
+ return image_after
205
+
206
+
207
+ def get_control_union_mode():
208
+ return list(controlnet_union_modes.keys())
209
+
210
+
211
+ def set_control_union_mode(i: int, mode: str, scale: str):
212
+ global control_modes
213
+ global control_scales
214
+ control_modes[i] = controlnet_union_modes.get(mode, 0)
215
+ control_scales[i] = scale
216
+ if mode != "None": return True
217
+ else: return gr.update(visible=True)
218
+
219
+
220
+ def set_control_union_image(i: int, mode: str, image: Image.Image | None, height: int, width: int, preprocess_resolution: int):
221
+ global control_images
222
+ if image is None: return None
223
+ control_images[i] = preprocess_image(image, mode, height, width, preprocess_resolution)
224
+ return control_images[i]
225
+
226
+
227
+ def preprocess_i2i_image(image_path: str, is_preprocess: bool, height: int, width: int):
228
+ try:
229
+ if not is_preprocess: return image_path
230
+ image_resolution = max(width, height)
231
+ image = Image.open(image_path)
232
+ image_resized = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
233
+ image_resized.save(image_path)
234
+ except Exception as e:
235
+ raise gr.Error(f"Error: {e}")
236
+ return image_path
237
+
238
+
239
+ def compose_lora_json(lorajson: list[dict], i: int, name: str, scale: float, filename: str, trigger: str):
240
+ lorajson[i]["name"] = str(name) if name != "None" else ""
241
+ lorajson[i]["scale"] = float(scale)
242
+ lorajson[i]["filename"] = str(filename)
243
+ lorajson[i]["trigger"] = str(trigger)
244
+ return lorajson
245
+
246
+
247
+ def is_valid_lora(lorajson: list[dict]):
248
+ valid = False
249
+ for d in lorajson:
250
+ if "name" in d.keys() and d["name"] and d["name"] != "None": valid = True
251
+ return valid
252
+
253
+
254
+ def get_trigger_word(lorajson: list[dict]):
255
+ trigger = ""
256
+ for d in lorajson:
257
+ if "name" in d.keys() and d["name"] and d["name"] != "None" and d["trigger"]:
258
+ trigger += ", " + d["trigger"]
259
+ return trigger
260
+
261
+
262
+ def get_model_trigger(model_name: str):
263
+ trigger = ""
264
+ if model_name in model_trigger.keys(): trigger += ", " + model_trigger[model_name]
265
+ return trigger
266
+
267
+
268
+ # https://huggingface.co/docs/diffusers/v0.23.1/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora
269
+ # https://github.com/huggingface/diffusers/issues/4919
270
+ def fuse_loras(pipe, lorajson: list[dict]):
271
+ try:
272
+ if not lorajson or not isinstance(lorajson, list): return pipe, [], []
273
+ a_list = []
274
+ w_list = []
275
+ for d in lorajson:
276
+ if not d or not isinstance(d, dict) or not d["name"] or d["name"] == "None": continue
277
+ k = d["name"]
278
+ if is_repo_name(k) and is_repo_exists(k):
279
+ a_name = Path(k).stem
280
+ pipe.load_lora_weights(k, weight_name=d["filename"], adapter_name = a_name, low_cpu_mem_usage=True)
281
+ elif not Path(k).exists():
282
+ print(f"LoRA not found: {k}")
283
+ continue
284
+ else:
285
+ w_name = Path(k).name
286
+ a_name = Path(k).stem
287
+ pipe.load_lora_weights(k, weight_name = w_name, adapter_name = a_name, low_cpu_mem_usage=True)
288
+ a_list.append(a_name)
289
+ w_list.append(d["scale"])
290
+ if not a_list: return pipe, [], []
291
+ #pipe.set_adapters(a_list, adapter_weights=w_list)
292
+ #pipe.fuse_lora(adapter_names=a_list, lora_scale=1.0)
293
+ #pipe.unload_lora_weights()
294
+ return pipe, a_list, w_list
295
+ except Exception as e:
296
+ print(f"External LoRA Error: {e}")
297
+ raise Exception(f"External LoRA Error: {e}") from e
298
+
299
+
300
+ def description_ui():
301
+ gr.Markdown(
302
+ """
303
+ - Mod of [multimodalart/flux-lora-the-explorer](https://huggingface.co/spaces/multimodalart/flux-lora-the-explorer),
304
+ [multimodalart/flux-lora-lab](https://huggingface.co/spaces/multimodalart/flux-lora-lab),
305
+ [jiuface/FLUX.1-dev-Controlnet-Union](https://huggingface.co/spaces/jiuface/FLUX.1-dev-Controlnet-Union),
306
+ [DamarJati/FLUX.1-DEV-Canny](https://huggingface.co/spaces/DamarJati/FLUX.1-DEV-Canny),
307
+ [gokaygokay/FLUX-Prompt-Generator](https://huggingface.co/spaces/gokaygokay/FLUX-Prompt-Generator).
308
+ """
309
+ )
310
+
311
+
312
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
313
+ def load_prompt_enhancer():
314
+ try:
315
+ model_checkpoint = "gokaygokay/Flux-Prompt-Enhance"
316
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
317
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint).eval().to(device=device)
318
+ enhancer_flux = pipeline('text2text-generation', model=model, tokenizer=tokenizer, repetition_penalty=1.5, device=device)
319
+ except Exception as e:
320
+ print(e)
321
+ enhancer_flux = None
322
+ return enhancer_flux
323
+
324
+
325
+ enhancer_flux = load_prompt_enhancer()
326
+
327
+
328
+ @spaces.GPU(duration=30)
329
+ def enhance_prompt(input_prompt):
330
+ result = enhancer_flux("enhance prompt: " + translate_to_en(input_prompt), max_length = 256)
331
+ enhanced_text = result[0]['generated_text']
332
+ return enhanced_text
333
+
334
+
335
+ def save_image(image, savefile, modelname, prompt, height, width, steps, cfg, seed):
336
+ import uuid
337
+ from PIL import PngImagePlugin
338
+ import json
339
+ try:
340
+ if savefile is None: savefile = f"{modelname.split('/')[-1]}_{str(uuid.uuid4())}.png"
341
+ metadata = {"prompt": prompt, "Model": {"Model": modelname.split("/")[-1]}}
342
+ metadata["num_inference_steps"] = steps
343
+ metadata["guidance_scale"] = cfg
344
+ metadata["seed"] = seed
345
+ metadata["resolution"] = f"{width} x {height}"
346
+ metadata_str = json.dumps(metadata)
347
+ info = PngImagePlugin.PngInfo()
348
+ info.add_text("metadata", metadata_str)
349
+ image.save(savefile, "PNG", pnginfo=info)
350
+ return str(Path(savefile).resolve())
351
+ except Exception as e:
352
+ print(f"Failed to save image file: {e}")
353
+ raise Exception(f"Failed to save image file:") from e
354
+
355
+
356
+ load_prompt_enhancer.zerogpu = True
357
+ fuse_loras.zerogpu = True
358
+ preprocess_image.zerogpu = True
359
+ get_control_params.zerogpu = True
360
+ clear_cache.zerogpu = True
open_flux.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ from diffusers.pipelines.flux.pipeline_output import FluxPipeline, FluxPipelineOutput
4
+ from typing import List, Union, Optional, Dict, Any, Callable
5
+ from diffusers.pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps
6
+
7
+ from diffusers.utils import is_torch_xla_available
8
+
9
+ if is_torch_xla_available():
10
+ import torch_xla.core.xla_model as xm
11
+
12
+ XLA_AVAILABLE = True
13
+ else:
14
+ XLA_AVAILABLE = False
15
+
16
+ # TODO this is rough. Need to properly stack unconditional or make it optional
17
+ class FluxWithCFGPipeline(FluxPipeline):
18
+ def __call__(
19
+ self,
20
+ prompt: Union[str, List[str]] = None,
21
+ prompt_2: Optional[Union[str, List[str]]] = None,
22
+ negative_prompt: Optional[Union[str, List[str]]] = None,
23
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
24
+ height: Optional[int] = None,
25
+ width: Optional[int] = None,
26
+ num_inference_steps: int = 28,
27
+ timesteps: List[int] = None,
28
+ guidance_scale: float = 7.0,
29
+ num_images_per_prompt: Optional[int] = 1,
30
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
31
+ latents: Optional[torch.FloatTensor] = None,
32
+ prompt_embeds: Optional[torch.FloatTensor] = None,
33
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
34
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
35
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
36
+ output_type: Optional[str] = "pil",
37
+ return_dict: bool = True,
38
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
39
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
40
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
41
+ max_sequence_length: int = 512,
42
+ ):
43
+
44
+ height = height or self.default_sample_size * self.vae_scale_factor
45
+ width = width or self.default_sample_size * self.vae_scale_factor
46
+
47
+ # 1. Check inputs. Raise error if not correct
48
+ self.check_inputs(
49
+ prompt,
50
+ prompt_2,
51
+ height,
52
+ width,
53
+ prompt_embeds=prompt_embeds,
54
+ pooled_prompt_embeds=pooled_prompt_embeds,
55
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
56
+ max_sequence_length=max_sequence_length,
57
+ )
58
+
59
+ self._guidance_scale = guidance_scale
60
+ self._joint_attention_kwargs = joint_attention_kwargs
61
+ self._interrupt = False
62
+
63
+ # 2. Define call parameters
64
+ if prompt is not None and isinstance(prompt, str):
65
+ batch_size = 1
66
+ elif prompt is not None and isinstance(prompt, list):
67
+ batch_size = len(prompt)
68
+ else:
69
+ batch_size = prompt_embeds.shape[0]
70
+
71
+ device = self._execution_device
72
+
73
+ lora_scale = (
74
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
75
+ )
76
+ (
77
+ prompt_embeds,
78
+ pooled_prompt_embeds,
79
+ text_ids,
80
+ ) = self.encode_prompt(
81
+ prompt=prompt,
82
+ prompt_2=prompt_2,
83
+ prompt_embeds=prompt_embeds,
84
+ pooled_prompt_embeds=pooled_prompt_embeds,
85
+ device=device,
86
+ num_images_per_prompt=num_images_per_prompt,
87
+ max_sequence_length=max_sequence_length,
88
+ lora_scale=lora_scale,
89
+ )
90
+ (
91
+ negative_prompt_embeds,
92
+ negative_pooled_prompt_embeds,
93
+ negative_text_ids,
94
+ ) = self.encode_prompt(
95
+ prompt=negative_prompt,
96
+ prompt_2=negative_prompt_2,
97
+ prompt_embeds=negative_prompt_embeds,
98
+ pooled_prompt_embeds=negative_pooled_prompt_embeds,
99
+ device=device,
100
+ num_images_per_prompt=num_images_per_prompt,
101
+ max_sequence_length=max_sequence_length,
102
+ lora_scale=lora_scale,
103
+ )
104
+
105
+ # 4. Prepare latent variables
106
+ num_channels_latents = self.transformer.config.in_channels // 4
107
+ latents, latent_image_ids = self.prepare_latents(
108
+ batch_size * num_images_per_prompt,
109
+ num_channels_latents,
110
+ height,
111
+ width,
112
+ prompt_embeds.dtype,
113
+ device,
114
+ generator,
115
+ latents,
116
+ )
117
+
118
+ # 5. Prepare timesteps
119
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
120
+ image_seq_len = latents.shape[1]
121
+ mu = calculate_shift(
122
+ image_seq_len,
123
+ self.scheduler.config.base_image_seq_len,
124
+ self.scheduler.config.max_image_seq_len,
125
+ self.scheduler.config.base_shift,
126
+ self.scheduler.config.max_shift,
127
+ )
128
+ timesteps, num_inference_steps = retrieve_timesteps(
129
+ self.scheduler,
130
+ num_inference_steps,
131
+ device,
132
+ timesteps,
133
+ sigmas,
134
+ mu=mu,
135
+ )
136
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
137
+ self._num_timesteps = len(timesteps)
138
+
139
+ # 6. Denoising loop
140
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
141
+ for i, t in enumerate(timesteps):
142
+ if self.interrupt:
143
+ continue
144
+
145
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
146
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
147
+
148
+ # handle guidance
149
+ if self.transformer.config.guidance_embeds:
150
+ guidance = torch.tensor([guidance_scale], device=device)
151
+ guidance = guidance.expand(latents.shape[0])
152
+ else:
153
+ guidance = None
154
+
155
+ noise_pred_text = self.transformer(
156
+ hidden_states=latents,
157
+ timestep=timestep / 1000,
158
+ guidance=guidance,
159
+ pooled_projections=pooled_prompt_embeds,
160
+ encoder_hidden_states=prompt_embeds,
161
+ txt_ids=text_ids,
162
+ img_ids=latent_image_ids,
163
+ joint_attention_kwargs=self.joint_attention_kwargs,
164
+ return_dict=False,
165
+ )[0]
166
+
167
+ # todo combine these
168
+ noise_pred_uncond = self.transformer(
169
+ hidden_states=latents,
170
+ timestep=timestep / 1000,
171
+ guidance=guidance,
172
+ pooled_projections=negative_pooled_prompt_embeds,
173
+ encoder_hidden_states=negative_prompt_embeds,
174
+ txt_ids=negative_text_ids,
175
+ img_ids=latent_image_ids,
176
+ joint_attention_kwargs=self.joint_attention_kwargs,
177
+ return_dict=False,
178
+ )[0]
179
+
180
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
181
+
182
+ # compute the previous noisy sample x_t -> x_t-1
183
+ latents_dtype = latents.dtype
184
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
185
+
186
+ if latents.dtype != latents_dtype:
187
+ if torch.backends.mps.is_available():
188
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
189
+ latents = latents.to(latents_dtype)
190
+
191
+ if callback_on_step_end is not None:
192
+ callback_kwargs = {}
193
+ for k in callback_on_step_end_tensor_inputs:
194
+ callback_kwargs[k] = locals()[k]
195
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
196
+
197
+ latents = callback_outputs.pop("latents", latents)
198
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
199
+
200
+ # call the callback, if provided
201
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
202
+ progress_bar.update()
203
+
204
+ if XLA_AVAILABLE:
205
+ xm.mark_step()
206
+
207
+ if output_type == "latent":
208
+ image = latents
209
+
210
+ else:
211
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
212
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
213
+ image = self.vae.decode(latents, return_dict=False)[0]
214
+ image = self.image_processor.postprocess(image, output_type=output_type)
215
+
216
+ # Offload all models
217
+ self.maybe_free_model_hooks()
218
+
219
+ if not return_dict:
220
+ return (image,)
221
+
222
+ return FluxPipelineOutput(images=image)
pipeline (2).py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Black Forest Labs, The HuggingFace Team and InstantX Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
21
+
22
+ from diffusers.image_processor import VaeImageProcessor
23
+ from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin
24
+ from diffusers.models.autoencoders import AutoencoderKL
25
+ from diffusers.models.transformers import FluxTransformer2DModel
26
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
27
+ from diffusers.utils import (
28
+ USE_PEFT_BACKEND,
29
+ is_torch_xla_available,
30
+ logging,
31
+ replace_example_docstring,
32
+ scale_lora_layers,
33
+ unscale_lora_layers,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
37
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
38
+
39
+
40
+ if is_torch_xla_available():
41
+ import torch_xla.core.xla_model as xm
42
+
43
+ XLA_AVAILABLE = True
44
+ else:
45
+ XLA_AVAILABLE = False
46
+
47
+
48
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
+
50
+ EXAMPLE_DOC_STRING = """
51
+ Examples:
52
+ ```py
53
+ >>> import torch
54
+ >>> from diffusers import FluxPipeline
55
+
56
+ >>> pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16)
57
+ >>> pipe.to("cuda")
58
+ >>> prompt = "A cat holding a sign that says hello world"
59
+ >>> # Depending on the variant being used, the pipeline call will slightly vary.
60
+ >>> # Refer to the pipeline documentation for more details.
61
+ >>> image = pipe(prompt, num_inference_steps=4, guidance_scale=0.0).images[0]
62
+ >>> image.save("flux.png")
63
+ ```
64
+ """
65
+
66
+
67
+ def calculate_shift(
68
+ image_seq_len,
69
+ base_seq_len: int = 256,
70
+ max_seq_len: int = 4096,
71
+ base_shift: float = 0.5,
72
+ max_shift: float = 1.16,
73
+ ):
74
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
75
+ b = base_shift - m * base_seq_len
76
+ mu = image_seq_len * m + b
77
+ return mu
78
+
79
+
80
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
81
+ def retrieve_timesteps(
82
+ scheduler,
83
+ num_inference_steps: Optional[int] = None,
84
+ device: Optional[Union[str, torch.device]] = None,
85
+ timesteps: Optional[List[int]] = None,
86
+ sigmas: Optional[List[float]] = None,
87
+ **kwargs,
88
+ ):
89
+ """
90
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
91
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
92
+
93
+ Args:
94
+ scheduler (`SchedulerMixin`):
95
+ The scheduler to get timesteps from.
96
+ num_inference_steps (`int`):
97
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
98
+ must be `None`.
99
+ device (`str` or `torch.device`, *optional*):
100
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
101
+ timesteps (`List[int]`, *optional*):
102
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
103
+ `num_inference_steps` and `sigmas` must be `None`.
104
+ sigmas (`List[float]`, *optional*):
105
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
106
+ `num_inference_steps` and `timesteps` must be `None`.
107
+
108
+ Returns:
109
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
110
+ second element is the number of inference steps.
111
+ """
112
+ if timesteps is not None and sigmas is not None:
113
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
114
+ if timesteps is not None:
115
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
116
+ if not accepts_timesteps:
117
+ raise ValueError(
118
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
119
+ f" timestep schedules. Please check whether you are using the correct scheduler."
120
+ )
121
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
122
+ timesteps = scheduler.timesteps
123
+ num_inference_steps = len(timesteps)
124
+ elif sigmas is not None:
125
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
126
+ if not accept_sigmas:
127
+ raise ValueError(
128
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
129
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
130
+ )
131
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
132
+ timesteps = scheduler.timesteps
133
+ num_inference_steps = len(timesteps)
134
+ else:
135
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
136
+ timesteps = scheduler.timesteps
137
+ return timesteps, num_inference_steps
138
+
139
+
140
+ class FluxWithCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixin):
141
+ r"""
142
+ The Flux pipeline for text-to-image generation.
143
+
144
+ Reference: https://blackforestlabs.ai/announcing-black-forest-labs/
145
+
146
+ Args:
147
+ transformer ([`FluxTransformer2DModel`]):
148
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
149
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
150
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
151
+ vae ([`AutoencoderKL`]):
152
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
153
+ text_encoder ([`CLIPTextModel`]):
154
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
155
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
156
+ text_encoder_2 ([`T5EncoderModel`]):
157
+ [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
158
+ the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
159
+ tokenizer (`CLIPTokenizer`):
160
+ Tokenizer of class
161
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
162
+ tokenizer_2 (`T5TokenizerFast`):
163
+ Second Tokenizer of class
164
+ [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
165
+ """
166
+
167
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
168
+ _optional_components = []
169
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
170
+
171
+ def __init__(
172
+ self,
173
+ scheduler: FlowMatchEulerDiscreteScheduler,
174
+ vae: AutoencoderKL,
175
+ text_encoder: CLIPTextModel,
176
+ tokenizer: CLIPTokenizer,
177
+ text_encoder_2: T5EncoderModel,
178
+ tokenizer_2: T5TokenizerFast,
179
+ transformer: FluxTransformer2DModel,
180
+ ):
181
+ super().__init__()
182
+
183
+ self.register_modules(
184
+ vae=vae,
185
+ text_encoder=text_encoder,
186
+ text_encoder_2=text_encoder_2,
187
+ tokenizer=tokenizer,
188
+ tokenizer_2=tokenizer_2,
189
+ transformer=transformer,
190
+ scheduler=scheduler,
191
+ )
192
+ self.vae_scale_factor = (
193
+ 2 ** (len(self.vae.config.block_out_channels)) if hasattr(self, "vae") and self.vae is not None else 16
194
+ )
195
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
196
+ self.tokenizer_max_length = (
197
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
198
+ )
199
+ self.default_sample_size = 64
200
+
201
+ def _get_t5_prompt_embeds(
202
+ self,
203
+ prompt: Union[str, List[str]] = None,
204
+ num_images_per_prompt: int = 1,
205
+ max_sequence_length: int = 512,
206
+ device: Optional[torch.device] = None,
207
+ dtype: Optional[torch.dtype] = None,
208
+ ):
209
+ device = device or self._execution_device
210
+ dtype = dtype or self.text_encoder.dtype
211
+
212
+ prompt = [prompt] if isinstance(prompt, str) else prompt
213
+ batch_size = len(prompt)
214
+
215
+ text_inputs = self.tokenizer_2(
216
+ prompt,
217
+ padding="max_length",
218
+ max_length=max_sequence_length,
219
+ truncation=True,
220
+ return_length=False,
221
+ return_overflowing_tokens=False,
222
+ return_tensors="pt",
223
+ )
224
+ text_input_ids = text_inputs.input_ids
225
+ untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
226
+
227
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
228
+ removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
229
+ logger.warning(
230
+ "The following part of your input was truncated because `max_sequence_length` is set to "
231
+ f" {max_sequence_length} tokens: {removed_text}"
232
+ )
233
+
234
+ prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
235
+
236
+ dtype = self.text_encoder_2.dtype
237
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
238
+
239
+ _, seq_len, _ = prompt_embeds.shape
240
+
241
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
242
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
243
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
244
+
245
+ return prompt_embeds
246
+
247
+ def _get_clip_prompt_embeds(
248
+ self,
249
+ prompt: Union[str, List[str]],
250
+ num_images_per_prompt: int = 1,
251
+ device: Optional[torch.device] = None,
252
+ ):
253
+ device = device or self._execution_device
254
+
255
+ prompt = [prompt] if isinstance(prompt, str) else prompt
256
+ batch_size = len(prompt)
257
+
258
+ text_inputs = self.tokenizer(
259
+ prompt,
260
+ padding="max_length",
261
+ max_length=self.tokenizer_max_length,
262
+ truncation=True,
263
+ return_overflowing_tokens=False,
264
+ return_length=False,
265
+ return_tensors="pt",
266
+ )
267
+
268
+ text_input_ids = text_inputs.input_ids
269
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
270
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
271
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
272
+ logger.warning(
273
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
274
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
275
+ )
276
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
277
+
278
+ # Use pooled output of CLIPTextModel
279
+ prompt_embeds = prompt_embeds.pooler_output
280
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
281
+
282
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
283
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
284
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
285
+
286
+ return prompt_embeds
287
+
288
+ def encode_prompt(
289
+ self,
290
+ prompt: Union[str, List[str]],
291
+ prompt_2: Union[str, List[str]],
292
+ negative_prompt: Union[str, List[str]],
293
+ device: Optional[torch.device] = None,
294
+ num_images_per_prompt: int = 1,
295
+ prompt_embeds: Optional[torch.FloatTensor] = None,
296
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
297
+ max_sequence_length: int = 512,
298
+ lora_scale: Optional[float] = None,
299
+ ):
300
+ r"""
301
+
302
+ Args:
303
+ prompt (`str` or `List[str]`, *optional*):
304
+ prompt to be encoded
305
+ prompt_2 (`str` or `List[str]`, *optional*):
306
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
307
+ used in all text-encoders
308
+ device: (`torch.device`):
309
+ torch device
310
+ num_images_per_prompt (`int`):
311
+ number of images that should be generated per prompt
312
+ prompt_embeds (`torch.FloatTensor`, *optional*):
313
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
314
+ provided, text embeddings will be generated from `prompt` input argument.
315
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
316
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
317
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
318
+ lora_scale (`float`, *optional*):
319
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
320
+ """
321
+ device = device or self._execution_device
322
+
323
+ # set lora scale so that monkey patched LoRA
324
+ # function of text encoder can correctly access it
325
+ if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
326
+ self._lora_scale = lora_scale
327
+
328
+ # dynamically adjust the LoRA scale
329
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
330
+ scale_lora_layers(self.text_encoder, lora_scale)
331
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
332
+ scale_lora_layers(self.text_encoder_2, lora_scale)
333
+
334
+ prompt = [prompt] if isinstance(prompt, str) else prompt
335
+ negative_prompt = [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
336
+
337
+ if prompt_embeds is None:
338
+ prompt_2 = prompt_2 or prompt
339
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
340
+
341
+ # We only use the pooled prompt output from the CLIPTextModel
342
+ pooled_prompt_embeds = self._get_clip_prompt_embeds(
343
+ prompt=prompt,
344
+ device=device,
345
+ num_images_per_prompt=num_images_per_prompt,
346
+ )
347
+ prompt_embeds = self._get_t5_prompt_embeds(
348
+ prompt=prompt_2,
349
+ num_images_per_prompt=num_images_per_prompt,
350
+ max_sequence_length=max_sequence_length,
351
+ device=device,
352
+ )
353
+
354
+ # We only use the pooled prompt output from the CLIPTextModel
355
+ negative_pooled_prompt_embeds = self._get_clip_prompt_embeds(
356
+ prompt=negative_prompt,
357
+ device=device,
358
+ num_images_per_prompt=num_images_per_prompt,
359
+ )
360
+ negative_prompt_embeds = self._get_t5_prompt_embeds(
361
+ prompt=negative_prompt,
362
+ num_images_per_prompt=num_images_per_prompt,
363
+ max_sequence_length=max_sequence_length,
364
+ device=device,
365
+ )
366
+
367
+ if self.text_encoder is not None:
368
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
369
+ # Retrieve the original scale by scaling back the LoRA layers
370
+ unscale_lora_layers(self.text_encoder, lora_scale)
371
+
372
+ if self.text_encoder_2 is not None:
373
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
374
+ # Retrieve the original scale by scaling back the LoRA layers
375
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
376
+
377
+ dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
378
+ text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
379
+
380
+ return prompt_embeds, pooled_prompt_embeds, text_ids, negative_prompt_embeds, negative_pooled_prompt_embeds
381
+
382
+ def check_inputs(
383
+ self,
384
+ prompt,
385
+ prompt_2,
386
+ height,
387
+ width,
388
+ prompt_embeds=None,
389
+ pooled_prompt_embeds=None,
390
+ callback_on_step_end_tensor_inputs=None,
391
+ max_sequence_length=None,
392
+ ):
393
+ if height % 8 != 0 or width % 8 != 0:
394
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
395
+
396
+ if callback_on_step_end_tensor_inputs is not None and not all(
397
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
398
+ ):
399
+ raise ValueError(
400
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
401
+ )
402
+
403
+ if prompt is not None and prompt_embeds is not None:
404
+ raise ValueError(
405
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
406
+ " only forward one of the two."
407
+ )
408
+ elif prompt_2 is not None and prompt_embeds is not None:
409
+ raise ValueError(
410
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
411
+ " only forward one of the two."
412
+ )
413
+ elif prompt is None and prompt_embeds is None:
414
+ raise ValueError(
415
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
416
+ )
417
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
418
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
419
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
420
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
421
+
422
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
423
+ raise ValueError(
424
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
425
+ )
426
+
427
+ if max_sequence_length is not None and max_sequence_length > 512:
428
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
429
+
430
+ @staticmethod
431
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
432
+ latent_image_ids = torch.zeros(height // 2, width // 2, 3)
433
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None]
434
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :]
435
+
436
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
437
+
438
+ latent_image_ids = latent_image_ids.reshape(
439
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
440
+ )
441
+
442
+ return latent_image_ids.to(device=device, dtype=dtype)
443
+
444
+ @staticmethod
445
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
446
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
447
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
448
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
449
+
450
+ return latents
451
+
452
+ @staticmethod
453
+ def _unpack_latents(latents, height, width, vae_scale_factor):
454
+ batch_size, num_patches, channels = latents.shape
455
+
456
+ height = height // vae_scale_factor
457
+ width = width // vae_scale_factor
458
+
459
+ latents = latents.view(batch_size, height, width, channels // 4, 2, 2)
460
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
461
+
462
+ latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2)
463
+
464
+ return latents
465
+
466
+ def enable_vae_slicing(self):
467
+ r"""
468
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
469
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
470
+ """
471
+ self.vae.enable_slicing()
472
+
473
+ def disable_vae_slicing(self):
474
+ r"""
475
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
476
+ computing decoding in one step.
477
+ """
478
+ self.vae.disable_slicing()
479
+
480
+ def enable_vae_tiling(self):
481
+ r"""
482
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
483
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
484
+ processing larger images.
485
+ """
486
+ self.vae.enable_tiling()
487
+
488
+ def disable_vae_tiling(self):
489
+ r"""
490
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
491
+ computing decoding in one step.
492
+ """
493
+ self.vae.disable_tiling()
494
+
495
+ def prepare_latents(
496
+ self,
497
+ batch_size,
498
+ num_channels_latents,
499
+ height,
500
+ width,
501
+ dtype,
502
+ device,
503
+ generator,
504
+ latents=None,
505
+ ):
506
+ height = 2 * (int(height) // self.vae_scale_factor)
507
+ width = 2 * (int(width) // self.vae_scale_factor)
508
+
509
+ shape = (batch_size, num_channels_latents, height, width)
510
+
511
+ if latents is not None:
512
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
513
+ return latents.to(device=device, dtype=dtype), latent_image_ids
514
+
515
+ if isinstance(generator, list) and len(generator) != batch_size:
516
+ raise ValueError(
517
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
518
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
519
+ )
520
+
521
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
522
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
523
+
524
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height, width, device, dtype)
525
+
526
+ return latents, latent_image_ids
527
+
528
+ @property
529
+ def guidance_scale(self):
530
+ return self._guidance_scale
531
+
532
+ @property
533
+ def do_classifier_free_guidance(self):
534
+ return self._guidance_scale > 1
535
+
536
+ @property
537
+ def joint_attention_kwargs(self):
538
+ return self._joint_attention_kwargs
539
+
540
+ @property
541
+ def num_timesteps(self):
542
+ return self._num_timesteps
543
+
544
+ @property
545
+ def interrupt(self):
546
+ return self._interrupt
547
+
548
+ @torch.no_grad()
549
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
550
+ def __call__(
551
+ self,
552
+ prompt: Union[str, List[str]] = None,
553
+ prompt_2: Optional[Union[str, List[str]]] = None,
554
+ negative_prompt: Union[str, List[str]] = None,
555
+ height: Optional[int] = None,
556
+ width: Optional[int] = None,
557
+ num_inference_steps: int = 28,
558
+ timesteps: List[int] = None,
559
+ guidance_scale: float = 3.5,
560
+ num_images_per_prompt: Optional[int] = 1,
561
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
562
+ latents: Optional[torch.FloatTensor] = None,
563
+ prompt_embeds: Optional[torch.FloatTensor] = None,
564
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
565
+ output_type: Optional[str] = "pil",
566
+ return_dict: bool = True,
567
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
568
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
569
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
570
+ max_sequence_length: int = 512,
571
+ ):
572
+ r"""
573
+ Function invoked when calling the pipeline for generation.
574
+
575
+ Args:
576
+ prompt (`str` or `List[str]`, *optional*):
577
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
578
+ instead.
579
+ prompt_2 (`str` or `List[str]`, *optional*):
580
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
581
+ will be used instead
582
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
583
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
584
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
585
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
586
+ num_inference_steps (`int`, *optional*, defaults to 50):
587
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
588
+ expense of slower inference.
589
+ timesteps (`List[int]`, *optional*):
590
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
591
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
592
+ passed will be used. Must be in descending order.
593
+ guidance_scale (`float`, *optional*, defaults to 7.0):
594
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
595
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
596
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
597
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
598
+ usually at the expense of lower image quality.
599
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
600
+ The number of images to generate per prompt.
601
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
602
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
603
+ to make generation deterministic.
604
+ latents (`torch.FloatTensor`, *optional*):
605
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
606
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
607
+ tensor will ge generated by sampling using the supplied random `generator`.
608
+ prompt_embeds (`torch.FloatTensor`, *optional*):
609
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
610
+ provided, text embeddings will be generated from `prompt` input argument.
611
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
612
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
613
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
614
+ output_type (`str`, *optional*, defaults to `"pil"`):
615
+ The output format of the generate image. Choose between
616
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
617
+ return_dict (`bool`, *optional*, defaults to `True`):
618
+ Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
619
+ joint_attention_kwargs (`dict`, *optional*):
620
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
621
+ `self.processor` in
622
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
623
+ callback_on_step_end (`Callable`, *optional*):
624
+ A function that calls at the end of each denoising steps during the inference. The function is called
625
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
626
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
627
+ `callback_on_step_end_tensor_inputs`.
628
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
629
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
630
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
631
+ `._callback_tensor_inputs` attribute of your pipeline class.
632
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
633
+
634
+ Examples:
635
+
636
+ Returns:
637
+ [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
638
+ is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
639
+ images.
640
+ """
641
+
642
+ height = height or self.default_sample_size * self.vae_scale_factor
643
+ width = width or self.default_sample_size * self.vae_scale_factor
644
+
645
+ # 1. Check inputs. Raise error if not correct
646
+ self.check_inputs(
647
+ prompt,
648
+ prompt_2,
649
+ height,
650
+ width,
651
+ prompt_embeds=prompt_embeds,
652
+ pooled_prompt_embeds=pooled_prompt_embeds,
653
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
654
+ max_sequence_length=max_sequence_length,
655
+ )
656
+
657
+ self._guidance_scale = guidance_scale
658
+ self._joint_attention_kwargs = joint_attention_kwargs
659
+ self._interrupt = False
660
+
661
+ # 2. Define call parameters
662
+ if prompt is not None and isinstance(prompt, str):
663
+ batch_size = 1
664
+ elif prompt is not None and isinstance(prompt, list):
665
+ batch_size = len(prompt)
666
+ else:
667
+ batch_size = prompt_embeds.shape[0]
668
+
669
+ device = self._execution_device
670
+
671
+ lora_scale = (
672
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
673
+ )
674
+ (
675
+ prompt_embeds,
676
+ pooled_prompt_embeds,
677
+ text_ids,
678
+ negative_prompt_embeds,
679
+ negative_pooled_prompt_embeds
680
+ ) = self.encode_prompt(
681
+ prompt=prompt,
682
+ prompt_2=prompt_2,
683
+ negative_prompt=negative_prompt,
684
+ prompt_embeds=prompt_embeds,
685
+ pooled_prompt_embeds=pooled_prompt_embeds,
686
+ device=device,
687
+ num_images_per_prompt=num_images_per_prompt,
688
+ max_sequence_length=max_sequence_length,
689
+ lora_scale=lora_scale,
690
+ )
691
+
692
+ if self.do_classifier_free_guidance:
693
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
694
+ pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
695
+
696
+ # 4. Prepare latent variables
697
+ num_channels_latents = self.transformer.config.in_channels // 4
698
+ latents, latent_image_ids = self.prepare_latents(
699
+ batch_size * num_images_per_prompt,
700
+ num_channels_latents,
701
+ height,
702
+ width,
703
+ prompt_embeds.dtype,
704
+ device,
705
+ generator,
706
+ latents,
707
+ )
708
+
709
+ # 5. Prepare timesteps
710
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
711
+ image_seq_len = latents.shape[1]
712
+ mu = calculate_shift(
713
+ image_seq_len,
714
+ self.scheduler.config.base_image_seq_len,
715
+ self.scheduler.config.max_image_seq_len,
716
+ self.scheduler.config.base_shift,
717
+ self.scheduler.config.max_shift,
718
+ )
719
+ timesteps, num_inference_steps = retrieve_timesteps(
720
+ self.scheduler,
721
+ num_inference_steps,
722
+ device,
723
+ timesteps,
724
+ sigmas,
725
+ mu=mu,
726
+ )
727
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
728
+ self._num_timesteps = len(timesteps)
729
+
730
+ # 6. Denoising loop
731
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
732
+ for i, t in enumerate(timesteps):
733
+ if self.interrupt:
734
+ continue
735
+
736
+ # expand the latents if we are doing classifier free guidance
737
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
738
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
739
+ timestep = t.expand(latent_model_input.shape[0])
740
+
741
+ noise_pred = self.transformer(
742
+ hidden_states=latent_model_input,
743
+ timestep=timestep / 1000,
744
+ pooled_projections=pooled_prompt_embeds,
745
+ encoder_hidden_states=prompt_embeds,
746
+ txt_ids=text_ids,
747
+ img_ids=latent_image_ids,
748
+ joint_attention_kwargs=self.joint_attention_kwargs,
749
+ return_dict=False,
750
+ )[0]
751
+
752
+ if self.do_classifier_free_guidance:
753
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
754
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
755
+
756
+ # compute the previous noisy sample x_t -> x_t-1
757
+ latents_dtype = latents.dtype
758
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
759
+
760
+ if latents.dtype != latents_dtype:
761
+ if torch.backends.mps.is_available():
762
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
763
+ latents = latents.to(latents_dtype)
764
+
765
+ if callback_on_step_end is not None:
766
+ callback_kwargs = {}
767
+ for k in callback_on_step_end_tensor_inputs:
768
+ callback_kwargs[k] = locals()[k]
769
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
770
+
771
+ latents = callback_outputs.pop("latents", latents)
772
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
773
+
774
+ # call the callback, if provided
775
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
776
+ progress_bar.update()
777
+
778
+ if XLA_AVAILABLE:
779
+ xm.mark_step()
780
+
781
+ if output_type == "latent":
782
+ image = latents
783
+
784
+ else:
785
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
786
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
787
+ image = self.vae.decode(latents, return_dict=False)[0]
788
+ image = self.image_processor.postprocess(image, output_type=output_type)
789
+
790
+ # Offload all models
791
+ self.maybe_free_model_hooks()
792
+
793
+ if not return_dict:
794
+ return (image,)
795
+
796
+ return FluxPipelineOutput(images=image)