barreloflube commited on
Commit
f5e5830
Β·
1 Parent(s): 6fc6fef

Refactor UI structure and import spaces module

Browse files
Files changed (2) hide show
  1. app.py +119 -321
  2. app3.py +1018 -0
app.py CHANGED
@@ -11,20 +11,9 @@ import spaces
11
  import gradio as gr
12
  from huggingface_hub import ModelCard
13
  import torch
14
- import numpy as np
15
  from pydantic import BaseModel
16
  from PIL import Image
17
  from diffusers import (
18
- FluxPipeline,
19
- FluxImg2ImgPipeline,
20
- FluxInpaintPipeline,
21
- FluxControlNetPipeline,
22
- StableDiffusionXLPipeline,
23
- StableDiffusionXLImg2ImgPipeline,
24
- StableDiffusionXLInpaintPipeline,
25
- StableDiffusionXLControlNetPipeline,
26
- StableDiffusionXLControlNetImg2ImgPipeline,
27
- StableDiffusionXLControlNetInpaintPipeline,
28
  AutoPipelineForText2Image,
29
  AutoPipelineForImage2Image,
30
  AutoPipelineForInpainting,
@@ -32,22 +21,12 @@ from diffusers import (
32
  AutoencoderKL,
33
  FluxControlNetModel,
34
  FluxMultiControlNetModel,
35
- ControlNetModel,
36
  )
37
- from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
38
  from huggingface_hub import hf_hub_download
39
- from transformers import CLIPFeatureExtractor
40
- from photomaker import FaceAnalysis2
41
  from diffusers.schedulers import *
42
  from huggingface_hub import hf_hub_download
43
- from safetensors.torch import load_file
44
  from controlnet_aux.processor import Processor
45
- from photomaker import (
46
- PhotoMakerStableDiffusionXLPipeline,
47
- PhotoMakerStableDiffusionXLControlNetPipeline,
48
- analyze_faces
49
- )
50
- from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl, get_weighted_text_embeddings_flux1
51
 
52
 
53
  # Initialize System
@@ -64,11 +43,6 @@ def load_sd():
64
  "repo_id": "black-forest-labs/FLUX.1-dev",
65
  "loader": "flux",
66
  "compute_type": torch.bfloat16,
67
- },
68
- {
69
- "repo_id": "SG161222/RealVisXL_V4.0",
70
- "loader": "xl",
71
- "compute_type": torch.float16,
72
  }
73
  ]
74
 
@@ -76,96 +50,39 @@ def load_sd():
76
  try:
77
  model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
78
  model['repo_id'],
 
79
  torch_dtype = model['compute_type'],
80
  safety_checker = None,
81
  variant = "fp16"
82
  ).to(device)
83
- model["pipeline"].enable_model_cpu_offload()
84
  except:
85
  model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
86
  model['repo_id'],
 
87
  torch_dtype = model['compute_type'],
88
  safety_checker = None
89
  ).to(device)
90
- model["pipeline"].enable_model_cpu_offload()
 
91
 
92
 
93
  # VAE n Refiner
 
94
  sdxl_vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device)
95
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=sdxl_vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to(device)
96
  refiner.enable_model_cpu_offload()
97
 
98
 
99
- # Safety Checker
100
- safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker").to(device)
101
- feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32", from_pt=True)
102
-
103
-
104
- # Controlnets
105
- controlnet_models = [
106
- {
107
- "repo_id": "xinsir/controlnet-depth-sdxl-1.0",
108
- "name": "depth_xl",
109
- "layers": ["depth"],
110
- "loader": "xl",
111
- "compute_type": torch.float16,
112
- },
113
- {
114
- "repo_id": "xinsir/controlnet-canny-sdxl-1.0",
115
- "name": "canny_xl",
116
- "layers": ["canny"],
117
- "loader": "xl",
118
- "compute_type": torch.float16,
119
- },
120
- {
121
- "repo_id": "xinsir/controlnet-openpose-sdxl-1.0",
122
- "name": "openpose_xl",
123
- "layers": ["pose"],
124
- "loader": "xl",
125
- "compute_type": torch.float16,
126
- },
127
- {
128
- "repo_id": "xinsir/controlnet-scribble-sdxl-1.0",
129
- "name": "scribble_xl",
130
- "layers": ["scribble"],
131
- "loader": "xl",
132
- "compute_type": torch.float16,
133
- },
134
- {
135
- "repo_id": "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
136
- "name": "flux1_union_pro",
137
- "layers": ["canny_fl", "tile_fl", "depth_fl", "blur_fl", "pose_fl", "gray_fl", "low_quality_fl"],
138
- "loader": "flux-multi",
139
- "compute_type": torch.bfloat16,
140
- }
141
- ]
142
-
143
- for controlnet in controlnet_models:
144
- if controlnet["loader"] == "xl":
145
- controlnet["controlnet"] = ControlNetModel.from_pretrained(
146
- controlnet["repo_id"],
147
- torch_dtype = controlnet['compute_type']
148
- ).to(device)
149
- elif controlnet["loader"] == "flux-multi":
150
- controlnet["controlnet"] = FluxMultiControlNetModel([FluxControlNetModel.from_pretrained(
151
- controlnet["repo_id"],
152
- torch_dtype = controlnet['compute_type']
153
- ).to(device)])
154
- #TODO: Add support for flux only controlnet
155
-
156
-
157
- # Face Detection (for PhotoMaker)
158
- face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition'])
159
- face_detector.prepare(ctx_id=0, det_size=(640, 640))
160
-
161
-
162
- # PhotoMaker V2 (for SDXL only)
163
- photomaker_ckpt = hf_hub_download(repo_id="TencentARC/PhotoMaker-V2", filename="photomaker-v2.bin", repo_type="model")
164
-
165
- return device, models, sdxl_vae, refiner, safety_checker, feature_extractor, controlnet_models, face_detector, photomaker_ckpt
166
 
167
 
168
- device, models, sdxl_vae, refiner, safety_checker, feature_extractor, controlnet_models, face_detector, photomaker_ckpt = load_sd()
169
 
170
 
171
  # Models
@@ -178,13 +95,11 @@ class ControlNetReq(BaseModel):
178
  arbitrary_types_allowed=True
179
 
180
 
181
- class SDReq(BaseModel):
182
  model: str = ""
183
  prompt: str = ""
184
- negative_prompt: Optional[str] = "black-forest-labs/FLUX.1-dev"
185
  fast_generation: Optional[bool] = True
186
  loras: Optional[list] = []
187
- embeddings: Optional[list] = []
188
  resize_mode: Optional[str] = "resize_and_fill" # resize_only, crop_and_resize, resize_and_fill
189
  scheduler: Optional[str] = "euler_fl"
190
  height: int = 1024
@@ -196,13 +111,12 @@ class SDReq(BaseModel):
196
  refiner: bool = False
197
  vae: bool = True
198
  controlnet_config: Optional[ControlNetReq] = None
199
- photomaker_images: Optional[List[Image.Image]] = None
200
 
201
  class Config:
202
  arbitrary_types_allowed=True
203
 
204
 
205
- class SDImg2ImgReq(SDReq):
206
  image: Image.Image
207
  strength: float = 1.0
208
 
@@ -210,115 +124,76 @@ class SDImg2ImgReq(SDReq):
210
  arbitrary_types_allowed=True
211
 
212
 
213
- class SDInpaintReq(SDImg2ImgReq):
214
  mask_image: Image.Image
215
 
216
  class Config:
217
  arbitrary_types_allowed=True
218
 
219
 
220
- # Helper functions
221
- def get_controlnet(controlnet_config: ControlNetReq):
222
  control_mode = []
223
- controlnet = []
224
 
225
- for m in controlnet_models:
226
- for c in controlnet_config.controlnets:
227
- if c in m["layers"]:
228
- control_mode.append(m["layers"].index(c))
229
- controlnet.append(m["controlnet"])
230
 
231
- return controlnet, control_mode
232
 
233
 
234
- def get_pipe(request: SDReq | SDImg2ImgReq | SDInpaintReq):
235
  for m in models:
236
- if m["repo_id"] == request.model:
237
- pipeline = m['pipeline']
238
- controlnet, control_mode = get_controlnet(request.controlnet_config) if request.controlnet_config else (None, None)
239
-
240
  pipe_args = {
241
- "pipeline": pipeline,
242
- "control_mode": control_mode,
243
  }
 
 
 
244
  if request.controlnet_config:
245
- pipe_args["controlnet"] = controlnet
246
-
247
- if not request.photomaker_images:
248
- if isinstance(request, SDReq):
249
- pipe_args['pipeline'] = AutoPipelineForText2Image.from_pipe(**pipe_args)
250
- elif isinstance(request, SDImg2ImgReq):
251
- pipe_args['pipeline'] = AutoPipelineForImage2Image.from_pipe(**pipe_args)
252
- elif isinstance(request, SDInpaintReq):
253
- pipe_args['pipeline'] = AutoPipelineForInpainting.from_pipe(**pipe_args)
254
- else:
255
- raise ValueError(f"Unknown request type: {type(request)}")
256
- elif isinstance(request, any([PhotoMakerStableDiffusionXLPipeline, PhotoMakerStableDiffusionXLControlNetPipeline])):
257
- if request.controlnet_config:
258
- pipe_args['pipeline'] = PhotoMakerStableDiffusionXLControlNetPipeline.from_pipe(**pipe_args)
259
- else:
260
- pipe_args['pipeline'] = PhotoMakerStableDiffusionXLPipeline.from_pipe(**pipe_args)
261
- else:
262
- raise ValueError(f"Invalid request type: {type(request)}")
263
-
264
- return pipe_args
265
-
266
-
267
- def load_scheduler(pipeline, scheduler):
268
- schedulers = {
269
- "dpmpp_2m": (DPMSolverMultistepScheduler, {}),
270
- "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
271
- "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"algorithm_type": "sde-dpmsolver++"}),
272
- "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"algorithm_type": "sde-dpmsolver++", "use_karras_sigmas": True}),
273
- "dpmpp_sde": (DPMSolverSinglestepScheduler, {}),
274
- "dpmpp_sde_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
275
- "dpm2": (KDPM2DiscreteScheduler, {}),
276
- "dpm2_k": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}),
277
- "dpm2_a": (KDPM2AncestralDiscreteScheduler, {}),
278
- "dpm2_a_k": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}),
279
- "euler": (EulerDiscreteScheduler, {}),
280
- "euler_a": (EulerAncestralDiscreteScheduler, {}),
281
- "heun": (HeunDiscreteScheduler, {}),
282
- "lms": (LMSDiscreteScheduler, {}),
283
- "lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
284
- "deis": (DEISMultistepScheduler, {}),
285
- "unipc": (UniPCMultistepScheduler, {}),
286
- "fm_euler": (FlowMatchEulerDiscreteScheduler, {}),
287
- }
288
- scheduler_class, kwargs = schedulers.get(scheduler, (None, {}))
289
-
290
- if scheduler_class is not None:
291
- scheduler = scheduler_class.from_config(pipeline.scheduler.config, **kwargs)
292
- else:
293
- raise ValueError(f"Unknown scheduler: {scheduler}")
294
-
295
- return scheduler
296
-
297
-
298
- def load_loras(pipeline, loras, fast_generation):
299
- for i, lora in enumerate(loras):
300
- pipeline.load_lora_weights(lora['repo_id'], adapter_name=f"lora_{i}")
301
- adapter_names = [f"lora_{i}" for i in range(len(loras))]
302
- adapter_weights = [lora['weight'] for lora in loras]
303
-
304
- if fast_generation:
305
- hyper_lora = hf_hub_download(
306
- "ByteDance/Hyper-SD",
307
- "Hyper-FLUX.1-dev-8steps-lora.safetensors" if isinstance(pipeline, FluxPipeline) else "Hyper-SDXL-2steps-lora.safetensors"
308
- )
309
- hyper_weight = 0.125 if isinstance(pipeline, FluxPipeline) else 1.0
310
- pipeline.load_lora_weights(hyper_lora, adapter_name="hyper_lora")
311
- adapter_names.append("hyper_lora")
312
- adapter_weights.append(hyper_weight)
313
-
314
- pipeline.set_adapters(adapter_names, adapter_weights)
315
-
316
-
317
- def load_xl_embeddings(pipeline, embeddings):
318
- for embedding in embeddings:
319
- state_dict = load_file(hf_hub_download(embedding['repo_id']))
320
- pipeline.load_textual_inversion(state_dict['clip_g'], token=embedding['token'], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)
321
- pipeline.load_textual_inversion(state_dict["clip_l"], token=embedding['token'], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
322
 
323
 
324
  def resize_images(images: List[Image.Image], height: int, width: int, resize_mode: str):
@@ -333,18 +208,16 @@ def resize_images(images: List[Image.Image], height: int, width: int, resize_mod
333
  return images
334
 
335
 
336
- def get_controlnet_images(controlnets: List[str], control_images: List[Image.Image], height: int, width: int, resize_mode: str):
337
  response_images = []
338
- control_images = resize_images(control_images, height, width, resize_mode)
339
- for controlnet, image in zip(controlnets, control_images):
340
- if controlnet == "canny" or controlnet == "canny_xs" or controlnet == "canny_fl":
341
  processor = Processor('canny')
342
- elif controlnet == "depth" or controlnet == "depth_xs" or controlnet == "depth_fl":
343
  processor = Processor('depth_midas')
344
- elif controlnet == "pose" or controlnet == "pose_fl":
345
  processor = Processor('openpose_full')
346
- elif controlnet == "scribble":
347
- processor = Processor('scribble')
348
  else:
349
  raise ValueError(f"Invalid Controlnet: {controlnet}")
350
 
@@ -353,72 +226,25 @@ def get_controlnet_images(controlnets: List[str], control_images: List[Image.Ima
353
  return response_images
354
 
355
 
356
- def check_image_safety(images: List[Image.Image]):
357
- safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
358
- has_nsfw_concepts = safety_checker(
359
- images=[images],
360
- clip_input=safety_checker_input.pixel_values.to("cuda"),
361
- )
362
-
363
- return has_nsfw_concepts[1]
364
-
365
-
366
- def get_prompt_attention(pipeline, prompt, negative_prompt):
367
- if isinstance(pipeline, (FluxPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxControlNetPipeline)):
368
- prompt_embeds, pooled_prompt_embeds = get_weighted_text_embeddings_flux1(pipeline, prompt)
369
- return prompt_embeds, None, pooled_prompt_embeds, None
370
- elif isinstance(pipeline, StableDiffusionXLPipeline):
371
- prompt_embeds, prompt_neg_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = get_weighted_text_embeddings_sdxl(pipeline, prompt, negative_prompt)
372
- return prompt_embeds, prompt_neg_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
373
- else:
374
- raise ValueError(f"Invalid pipeline type: {type(pipeline)}")
375
-
376
-
377
- def get_photomaker_images(photomaker_images: List[Image.Image], height: int, width: int, resize_mode: str):
378
- image_input_ids = []
379
- image_id_embeds = []
380
- photomaker_images = resize_images(photomaker_images, height, width, resize_mode)
381
-
382
- for image in photomaker_images:
383
- image_input_ids.append(img)
384
- img = np.array(image)[:, :, ::-1]
385
- faces = analyze_faces(face_detector, image)
386
- if len(faces) > 0:
387
- image_id_embeds.append(torch.from_numpy(faces[0]['embeddings']))
388
- else:
389
- raise ValueError("No face detected in the image")
390
-
391
- return image_input_ids, image_id_embeds
392
 
393
 
394
- def cleanup(pipeline, loras = None, embeddings = None):
395
  if loras:
396
- pipeline.disable_lora()
397
  pipeline.unload_lora_weights()
398
- if embeddings:
399
- pipeline.unload_textual_inversion()
400
  gc.collect()
401
  torch.cuda.empty_cache()
402
 
403
 
404
- # Gen function
405
- def gen_img(
406
- request: SDReq | SDImg2ImgReq | SDInpaintReq
407
- ):
408
- pipeline_args = get_pipe(request)
409
- pipeline = pipeline_args['pipeline']
410
  try:
411
- pipeline.scheduler = load_scheduler(pipeline, request.scheduler)
412
 
413
- load_loras(pipeline, request.loras, request.fast_generation)
414
- load_xl_embeddings(pipeline, request.embeddings)
415
-
416
- control_images = get_controlnet_images(request.controlnet_config.controlnets, request.controlnet_config.control_images, request.height, request.width, request.resize_mode) if request.controlnet_config else None
417
- photomaker_images, photomaker_id_embeds = get_photomaker_images(request.photomaker_images, request.height, request.width) if request.photomaker_images else (None, None)
418
-
419
- positive_prompt_embeds, negative_prompt_embeds, positive_prompt_pooled, negative_prompt_pooled = get_prompt_attention(pipeline, request.prompt, request.negative_prompt)
420
-
421
- # Common args
422
  args = {
423
  'prompt_embeds': positive_prompt_embeds,
424
  'pooled_prompt_embeds': positive_prompt_pooled,
@@ -430,54 +256,32 @@ def gen_img(
430
  'generator': [torch.Generator(device=device).manual_seed(request.seed + i) if not request.seed is any([None, 0, -1]) else torch.Generator(device=device).manual_seed(random.randint(0, 2**32 - 1)) for i in range(request.num_images_per_prompt)],
431
  }
432
 
433
- if isinstance(pipeline, any([StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline,
434
- StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline])):
435
- args['clip_skip'] = request.clip_skip
436
- args['negative_prompt_embeds'] = negative_prompt_embeds
437
- args['negative_pooled_prompt_embeds'] = negative_prompt_pooled
438
-
439
- if isinstance(pipeline, FluxControlNetPipeline) and request.controlnet_config:
440
- args['control_mode'] = pipeline_args['control_mode']
441
- args['control_image'] = control_images
442
- args['controlnet_conditioning_scale'] = request.controlnet_conditioning_scale
443
-
444
- if not isinstance(pipeline, FluxControlNetPipeline) and request.controlnet_config:
445
- args['controlnet_conditioning_scale'] = request.controlnet_conditioning_scale
446
 
447
- if isinstance(request, SDReq):
448
- args['image'] = control_images
449
- elif isinstance(request, (SDImg2ImgReq, SDInpaintReq)):
450
- args['control_image'] = control_images
451
-
452
- if request.photomaker_images and isinstance(pipeline, any([PhotoMakerStableDiffusionXLPipeline, PhotoMakerStableDiffusionXLControlNetPipeline])):
453
- args['input_id_images'] = photomaker_images
454
- args['input_id_embeds'] = photomaker_id_embeds
455
- args['start_merge_step'] = 10
456
-
457
- if isinstance(request, SDImg2ImgReq):
458
- args['image'] = resize_images([request.image], request.height, request.width, request.resize_mode)
459
- args['strength'] = request.strength
460
- elif isinstance(request, SDInpaintReq):
461
- args['image'] = resize_images([request.image], request.height, request.width, request.resize_mode)
462
- args['mask_image'] = resize_images([request.mask_image], request.height, request.width, request.resize_mode)
463
  args['strength'] = request.strength
464
 
 
 
 
 
465
  images = pipeline(**args).images
466
 
 
467
  if request.refiner:
468
- images = refiner(
469
- prompt=request.prompt,
470
- num_inference_steps=40,
471
- denoising_start=0.7,
472
- image=images.images
473
- ).images
474
 
475
- cleanup(pipeline, request.loras, request.embeddings)
476
 
477
  return images
478
  except Exception as e:
479
- cleanup(pipeline, request.loras, request.embeddings)
480
- raise ValueError(f"Error generating image: {e}") from e
 
481
 
482
 
483
  # CSS
@@ -730,18 +534,16 @@ def generate_image(
730
  "vae": vae,
731
  "controlnet_config": None,
732
  }
733
- base_args = SDReq(**base_args)
734
-
735
  if len(enabled_loras) > 0:
736
  base_args.loras = []
737
- for enabled_lora, lora_slider in zip(enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5]):
738
- if enabled_lora.get("repo_id", None):
739
- base_args.loras.append(
740
- {
741
- "repo_id": enabled_lora["repo_id"],
742
- "weight": lora_slider
743
- }
744
- )
745
 
746
  image = None
747
  mask_image = None
@@ -751,7 +553,7 @@ def generate_image(
751
  image = img2img_image
752
  strength = float(img2img_strength)
753
 
754
- base_args = SDImg2ImgReq(
755
  **base_args.__dict__,
756
  image=image,
757
  strength=strength
@@ -761,7 +563,7 @@ def generate_image(
761
  mask_image = inpaint_image['layers'][0] if image else None
762
  strength = float(inpaint_strength)
763
 
764
- base_args = SDInpaintReq(
765
  **base_args.__dict__,
766
  image=image,
767
  mask_image=mask_image,
@@ -775,27 +577,23 @@ def generate_image(
775
  )
776
 
777
  if canny_image:
778
- base_args.controlnet_config.controlnets.append("canny_fl")
779
  base_args.controlnet_config.control_images.append(canny_image)
780
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(canny_strength))
781
  if pose_image:
782
- base_args.controlnet_config.controlnets.append("pose_fl")
783
  base_args.controlnet_config.control_images.append(pose_image)
784
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(pose_strength))
785
  if depth_image:
786
- base_args.controlnet_config.controlnets.append("depth_fl")
787
  base_args.controlnet_config.control_images.append(depth_image)
788
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(depth_strength))
789
  else:
790
- base_args = SDReq(**base_args.__dict__)
791
-
792
- images = gen_img(base_args)
793
 
794
- return (
795
- gr.update(
796
- value=images,
797
- interactive=True
798
- )
799
  )
800
 
801
 
 
11
  import gradio as gr
12
  from huggingface_hub import ModelCard
13
  import torch
 
14
  from pydantic import BaseModel
15
  from PIL import Image
16
  from diffusers import (
 
 
 
 
 
 
 
 
 
 
17
  AutoPipelineForText2Image,
18
  AutoPipelineForImage2Image,
19
  AutoPipelineForInpainting,
 
21
  AutoencoderKL,
22
  FluxControlNetModel,
23
  FluxMultiControlNetModel,
 
24
  )
 
25
  from huggingface_hub import hf_hub_download
 
 
26
  from diffusers.schedulers import *
27
  from huggingface_hub import hf_hub_download
 
28
  from controlnet_aux.processor import Processor
29
+ from sd_embed.embedding_funcs import get_weighted_text_embeddings_flux1
 
 
 
 
 
30
 
31
 
32
  # Initialize System
 
43
  "repo_id": "black-forest-labs/FLUX.1-dev",
44
  "loader": "flux",
45
  "compute_type": torch.bfloat16,
 
 
 
 
 
46
  }
47
  ]
48
 
 
50
  try:
51
  model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
52
  model['repo_id'],
53
+ vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device),
54
  torch_dtype = model['compute_type'],
55
  safety_checker = None,
56
  variant = "fp16"
57
  ).to(device)
 
58
  except:
59
  model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
60
  model['repo_id'],
61
+ vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device),
62
  torch_dtype = model['compute_type'],
63
  safety_checker = None
64
  ).to(device)
65
+
66
+ model["pipeline"].enable_model_cpu_offload()
67
 
68
 
69
  # VAE n Refiner
70
+ flux_vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to(device)
71
  sdxl_vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device)
72
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=sdxl_vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to(device)
73
  refiner.enable_model_cpu_offload()
74
 
75
 
76
+ # ControlNet
77
+ controlnet = FluxMultiControlNetModel([FluxControlNetModel.from_pretrained(
78
+ "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
79
+ torch_dtype=torch.bfloat16
80
+ ).to(device)])
81
+
82
+ return device, models, flux_vae, sdxl_vae, refiner, controlnet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
 
85
+ device, models, flux_vae, sdxl_vae, refiner, controlnet = load_sd()
86
 
87
 
88
  # Models
 
95
  arbitrary_types_allowed=True
96
 
97
 
98
+ class FluxReq(BaseModel):
99
  model: str = ""
100
  prompt: str = ""
 
101
  fast_generation: Optional[bool] = True
102
  loras: Optional[list] = []
 
103
  resize_mode: Optional[str] = "resize_and_fill" # resize_only, crop_and_resize, resize_and_fill
104
  scheduler: Optional[str] = "euler_fl"
105
  height: int = 1024
 
111
  refiner: bool = False
112
  vae: bool = True
113
  controlnet_config: Optional[ControlNetReq] = None
 
114
 
115
  class Config:
116
  arbitrary_types_allowed=True
117
 
118
 
119
+ class FluxImg2ImgReq(FluxReq):
120
  image: Image.Image
121
  strength: float = 1.0
122
 
 
124
  arbitrary_types_allowed=True
125
 
126
 
127
+ class FluxInpaintReq(FluxImg2ImgReq):
128
  mask_image: Image.Image
129
 
130
  class Config:
131
  arbitrary_types_allowed=True
132
 
133
 
134
+ # Helper Functions
135
+ def get_control_mode(controlnet_config: ControlNetReq):
136
  control_mode = []
137
+ layers = ["canny", "tile", "depth", "blur", "pose", "gray", "low_quality"]
138
 
139
+ for c in controlnet_config.controlnets:
140
+ if c in layers:
141
+ control_mode.append(layers.index(c))
 
 
142
 
143
+ return control_mode
144
 
145
 
146
+ def get_pipe(request: FluxReq | FluxImg2ImgReq | FluxInpaintReq):
147
  for m in models:
148
+ if m['repo_id'] == request.model:
 
 
 
149
  pipe_args = {
150
+ "pipeline": m['pipeline'],
 
151
  }
152
+
153
+
154
+ # Set ControlNet config
155
  if request.controlnet_config:
156
+ pipe_args["control_mode"] = get_control_mode(request.controlnet_config)
157
+ pipe_args["controlnet"] = [controlnet]
158
+
159
+
160
+ # Choose Pipeline Mode
161
+ if isinstance(request, FluxReq):
162
+ pipe_args['pipeline'] = AutoPipelineForText2Image.from_pipe(**pipe_args)
163
+ elif isinstance(request, FluxImg2ImgReq):
164
+ pipe_args['pipeline'] = AutoPipelineForImage2Image.from_pipe(**pipe_args)
165
+ elif isinstance(request, FluxInpaintReq):
166
+ pipe_args['pipeline'] = AutoPipelineForInpainting.from_pipe(**pipe_args)
167
+
168
+
169
+ # Enable or Disable Refiner
170
+ if request.vae:
171
+ pipe_args["pipeline"].vae = flux_vae
172
+ elif not request.vae:
173
+ pipe_args["pipeline"].vae = None
174
+
175
+
176
+ # Set Scheduler
177
+ pipe_args["pipeline"].scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe_args["pipeline"].scheduler.config)
178
+
179
+
180
+ # Set Loras
181
+ if request.loras:
182
+ for i, lora in enumerate(request.loras):
183
+ pipe_args["pipeline"].load_lora_weights(request.lora['repo_id'], adapter_name=f"lora_{i}")
184
+ adapter_names = [f"lora_{i}" for i in range(len(request.loras))]
185
+ adapter_weights = [lora['weight'] for lora in request.loras]
186
+
187
+ if request.fast_generation:
188
+ hyper_lora = hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors")
189
+ hyper_weight = 0.125
190
+ pipe_args["pipeline"].load_lora_weights(hyper_lora, adapter_name="hyper_lora")
191
+ adapter_names.append("hyper_lora")
192
+ adapter_weights.append(hyper_weight)
193
+
194
+ pipe_args["pipeline"].set_adapters(adapter_names, adapter_weights)
195
+
196
+ return pipe_args
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
 
199
  def resize_images(images: List[Image.Image], height: int, width: int, resize_mode: str):
 
208
  return images
209
 
210
 
211
+ def get_controlnet_images(controlnet_config: ControlNetReq, height: int, width: int, resize_mode: str):
212
  response_images = []
213
+ control_images = resize_images(controlnet_config.control_images, height, width, resize_mode)
214
+ for controlnet, image in zip(controlnet_config.controlnets, control_images):
215
+ if controlnet == "canny":
216
  processor = Processor('canny')
217
+ elif controlnet == "depth":
218
  processor = Processor('depth_midas')
219
+ elif controlnet == "pose":
220
  processor = Processor('openpose_full')
 
 
221
  else:
222
  raise ValueError(f"Invalid Controlnet: {controlnet}")
223
 
 
226
  return response_images
227
 
228
 
229
+ def get_prompt_attention(pipeline, prompt):
230
+ return get_weighted_text_embeddings_flux1(pipeline, prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
 
233
+ def cleanup(pipeline, loras = None):
234
  if loras:
 
235
  pipeline.unload_lora_weights()
 
 
236
  gc.collect()
237
  torch.cuda.empty_cache()
238
 
239
 
240
+ # Gen Function
241
+ def gen_img(request: FluxReq | FluxImg2ImgReq | FluxInpaintReq):
242
+ pipe_args = get_pipe(request)
243
+ pipeline = pipe_args["pipeline"]
 
 
244
  try:
245
+ positive_prompt_embeds, positive_prompt_pooled = get_prompt_attention(pipeline, request.prompt)
246
 
247
+ # Common Args
 
 
 
 
 
 
 
 
248
  args = {
249
  'prompt_embeds': positive_prompt_embeds,
250
  'pooled_prompt_embeds': positive_prompt_pooled,
 
256
  'generator': [torch.Generator(device=device).manual_seed(request.seed + i) if not request.seed is any([None, 0, -1]) else torch.Generator(device=device).manual_seed(random.randint(0, 2**32 - 1)) for i in range(request.num_images_per_prompt)],
257
  }
258
 
259
+ if request.controlnet_config:
260
+ args['control_mode'] = get_control_mode(request.controlnet_config)
261
+ args['control_images'] = get_controlnet_images(request.controlnet_config, request.height, request.width, request.resize_mode)
262
+ args['controlnet_conditioning_scale'] = request.controlnet_config.controlnet_conditioning_scale
 
 
 
 
 
 
 
 
 
263
 
264
+ if isinstance(request, (FluxImg2ImgReq, FluxInpaintReq)):
265
+ args['image'] = resize_images([request.image], request.height, request.width, request.resize_mode)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
  args['strength'] = request.strength
267
 
268
+ if isinstance(request, FluxInpaintReq):
269
+ args['mask_image'] = resize_images([request.mask_image], request.height, request.width, request.resize_mode)[0]
270
+
271
+ # Generate
272
  images = pipeline(**args).images
273
 
274
+ # Refiner
275
  if request.refiner:
276
+ images = refiner(image=images, prompt=request.prompt, num_inference_steps=40, denoising_start=0.7).images
 
 
 
 
 
277
 
278
+ cleanup(pipeline, request.loras)
279
 
280
  return images
281
  except Exception as e:
282
+ cleanup(pipeline, request.loras)
283
+ raise gr.Error(f"Error: {e}")
284
+
285
 
286
 
287
  # CSS
 
534
  "vae": vae,
535
  "controlnet_config": None,
536
  }
537
+ base_args = FluxReq(**base_args)
538
+
539
  if len(enabled_loras) > 0:
540
  base_args.loras = []
541
+ for enabled_lora, slider in zip(enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5]):
542
+ if enabled_lora['repo_id']:
543
+ base_args.loras.append({
544
+ "repo_id": enabled_lora['repo_id'],
545
+ "weight": slider
546
+ })
 
 
547
 
548
  image = None
549
  mask_image = None
 
553
  image = img2img_image
554
  strength = float(img2img_strength)
555
 
556
+ base_args = FluxImg2ImgReq(
557
  **base_args.__dict__,
558
  image=image,
559
  strength=strength
 
563
  mask_image = inpaint_image['layers'][0] if image else None
564
  strength = float(inpaint_strength)
565
 
566
+ base_args = FluxInpaintReq(
567
  **base_args.__dict__,
568
  image=image,
569
  mask_image=mask_image,
 
577
  )
578
 
579
  if canny_image:
580
+ base_args.controlnet_config.controlnets.append("canny")
581
  base_args.controlnet_config.control_images.append(canny_image)
582
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(canny_strength))
583
  if pose_image:
584
+ base_args.controlnet_config.controlnets.append("pose")
585
  base_args.controlnet_config.control_images.append(pose_image)
586
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(pose_strength))
587
  if depth_image:
588
+ base_args.controlnet_config.controlnets.append("depth")
589
  base_args.controlnet_config.control_images.append(depth_image)
590
  base_args.controlnet_config.controlnet_conditioning_scale.append(float(depth_strength))
591
  else:
592
+ base_args = FluxReq(**base_args.__dict__)
 
 
593
 
594
+ return gr.update(
595
+ value=gen_img(base_args),
596
+ interactive=True
 
 
597
  )
598
 
599
 
app3.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Testing one file gradio app for zero gpu spaces not working as expected.
2
+ # Check here for the issue: https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/106#66e278a396acd45223e0d00b
3
+
4
+ import os
5
+ import gc
6
+ import json
7
+ import random
8
+ from typing import List, Optional
9
+
10
+ import spaces
11
+ import gradio as gr
12
+ from huggingface_hub import ModelCard
13
+ import torch
14
+ import numpy as np
15
+ from pydantic import BaseModel
16
+ from PIL import Image
17
+ from diffusers import (
18
+ FluxPipeline,
19
+ FluxImg2ImgPipeline,
20
+ FluxInpaintPipeline,
21
+ FluxControlNetPipeline,
22
+ StableDiffusionXLPipeline,
23
+ StableDiffusionXLImg2ImgPipeline,
24
+ StableDiffusionXLInpaintPipeline,
25
+ StableDiffusionXLControlNetPipeline,
26
+ StableDiffusionXLControlNetImg2ImgPipeline,
27
+ StableDiffusionXLControlNetInpaintPipeline,
28
+ AutoPipelineForText2Image,
29
+ AutoPipelineForImage2Image,
30
+ AutoPipelineForInpainting,
31
+ DiffusionPipeline,
32
+ AutoencoderKL,
33
+ FluxControlNetModel,
34
+ FluxMultiControlNetModel,
35
+ ControlNetModel,
36
+ )
37
+ from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
38
+ from huggingface_hub import hf_hub_download
39
+ from transformers import CLIPFeatureExtractor
40
+ from photomaker import FaceAnalysis2
41
+ from diffusers.schedulers import *
42
+ from huggingface_hub import hf_hub_download
43
+ from safetensors.torch import load_file
44
+ from controlnet_aux.processor import Processor
45
+ from photomaker import (
46
+ PhotoMakerStableDiffusionXLPipeline,
47
+ PhotoMakerStableDiffusionXLControlNetPipeline,
48
+ analyze_faces
49
+ )
50
+ from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl, get_weighted_text_embeddings_flux1
51
+
52
+
53
+ # Initialize System
54
+ os.system("pip install --upgrade pip")
55
+
56
+
57
+ def load_sd():
58
+ # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
+ device = "cuda" if torch.cuda.is_available() else "cpu"
60
+
61
+ # Models
62
+ models = [
63
+ {
64
+ "repo_id": "black-forest-labs/FLUX.1-dev",
65
+ "loader": "flux",
66
+ "compute_type": torch.bfloat16,
67
+ },
68
+ {
69
+ "repo_id": "SG161222/RealVisXL_V4.0",
70
+ "loader": "xl",
71
+ "compute_type": torch.float16,
72
+ }
73
+ ]
74
+
75
+ for model in models:
76
+ try:
77
+ model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
78
+ model['repo_id'],
79
+ torch_dtype = model['compute_type'],
80
+ safety_checker = None,
81
+ variant = "fp16"
82
+ ).to(device)
83
+ model["pipeline"].enable_model_cpu_offload()
84
+ except:
85
+ model["pipeline"] = AutoPipelineForText2Image.from_pretrained(
86
+ model['repo_id'],
87
+ torch_dtype = model['compute_type'],
88
+ safety_checker = None
89
+ ).to(device)
90
+ model["pipeline"].enable_model_cpu_offload()
91
+
92
+
93
+ # VAE n Refiner
94
+ sdxl_vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16).to(device)
95
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=sdxl_vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to(device)
96
+ refiner.enable_model_cpu_offload()
97
+
98
+
99
+ # Safety Checker
100
+ safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker").to(device)
101
+ feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32", from_pt=True)
102
+
103
+
104
+ # Controlnets
105
+ controlnet_models = [
106
+ {
107
+ "repo_id": "xinsir/controlnet-depth-sdxl-1.0",
108
+ "name": "depth_xl",
109
+ "layers": ["depth"],
110
+ "loader": "xl",
111
+ "compute_type": torch.float16,
112
+ },
113
+ {
114
+ "repo_id": "xinsir/controlnet-canny-sdxl-1.0",
115
+ "name": "canny_xl",
116
+ "layers": ["canny"],
117
+ "loader": "xl",
118
+ "compute_type": torch.float16,
119
+ },
120
+ {
121
+ "repo_id": "xinsir/controlnet-openpose-sdxl-1.0",
122
+ "name": "openpose_xl",
123
+ "layers": ["pose"],
124
+ "loader": "xl",
125
+ "compute_type": torch.float16,
126
+ },
127
+ {
128
+ "repo_id": "xinsir/controlnet-scribble-sdxl-1.0",
129
+ "name": "scribble_xl",
130
+ "layers": ["scribble"],
131
+ "loader": "xl",
132
+ "compute_type": torch.float16,
133
+ },
134
+ {
135
+ "repo_id": "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
136
+ "name": "flux1_union_pro",
137
+ "layers": ["canny_fl", "tile_fl", "depth_fl", "blur_fl", "pose_fl", "gray_fl", "low_quality_fl"],
138
+ "loader": "flux-multi",
139
+ "compute_type": torch.bfloat16,
140
+ }
141
+ ]
142
+
143
+ for controlnet in controlnet_models:
144
+ if controlnet["loader"] == "xl":
145
+ controlnet["controlnet"] = ControlNetModel.from_pretrained(
146
+ controlnet["repo_id"],
147
+ torch_dtype = controlnet['compute_type']
148
+ ).to(device)
149
+ elif controlnet["loader"] == "flux-multi":
150
+ controlnet["controlnet"] = FluxMultiControlNetModel([FluxControlNetModel.from_pretrained(
151
+ controlnet["repo_id"],
152
+ torch_dtype = controlnet['compute_type']
153
+ ).to(device)])
154
+ #TODO: Add support for flux only controlnet
155
+
156
+
157
+ # Face Detection (for PhotoMaker)
158
+ face_detector = FaceAnalysis2(providers=['CUDAExecutionProvider'], allowed_modules=['detection', 'recognition'])
159
+ face_detector.prepare(ctx_id=0, det_size=(640, 640))
160
+
161
+
162
+ # PhotoMaker V2 (for SDXL only)
163
+ photomaker_ckpt = hf_hub_download(repo_id="TencentARC/PhotoMaker-V2", filename="photomaker-v2.bin", repo_type="model")
164
+
165
+ return device, models, sdxl_vae, refiner, safety_checker, feature_extractor, controlnet_models, face_detector, photomaker_ckpt
166
+
167
+
168
+ device, models, sdxl_vae, refiner, safety_checker, feature_extractor, controlnet_models, face_detector, photomaker_ckpt = load_sd()
169
+
170
+
171
+ # Models
172
+ class ControlNetReq(BaseModel):
173
+ controlnets: List[str] # ["canny", "tile", "depth"]
174
+ control_images: List[Image.Image]
175
+ controlnet_conditioning_scale: List[float]
176
+
177
+ class Config:
178
+ arbitrary_types_allowed=True
179
+
180
+
181
+ class SDReq(BaseModel):
182
+ model: str = ""
183
+ prompt: str = ""
184
+ negative_prompt: Optional[str] = "black-forest-labs/FLUX.1-dev"
185
+ fast_generation: Optional[bool] = True
186
+ loras: Optional[list] = []
187
+ embeddings: Optional[list] = []
188
+ resize_mode: Optional[str] = "resize_and_fill" # resize_only, crop_and_resize, resize_and_fill
189
+ scheduler: Optional[str] = "euler_fl"
190
+ height: int = 1024
191
+ width: int = 1024
192
+ num_images_per_prompt: int = 1
193
+ num_inference_steps: int = 8
194
+ guidance_scale: float = 3.5
195
+ seed: Optional[int] = 0
196
+ refiner: bool = False
197
+ vae: bool = True
198
+ controlnet_config: Optional[ControlNetReq] = None
199
+ photomaker_images: Optional[List[Image.Image]] = None
200
+
201
+ class Config:
202
+ arbitrary_types_allowed=True
203
+
204
+
205
+ class SDImg2ImgReq(SDReq):
206
+ image: Image.Image
207
+ strength: float = 1.0
208
+
209
+ class Config:
210
+ arbitrary_types_allowed=True
211
+
212
+
213
+ class SDInpaintReq(SDImg2ImgReq):
214
+ mask_image: Image.Image
215
+
216
+ class Config:
217
+ arbitrary_types_allowed=True
218
+
219
+
220
+ # Helper functions
221
+ def get_controlnet(controlnet_config: ControlNetReq):
222
+ control_mode = []
223
+ controlnet = []
224
+
225
+ for m in controlnet_models:
226
+ for c in controlnet_config.controlnets:
227
+ if c in m["layers"]:
228
+ control_mode.append(m["layers"].index(c))
229
+ controlnet.append(m["controlnet"])
230
+
231
+ return controlnet, control_mode
232
+
233
+
234
+ def get_pipe(request: SDReq | SDImg2ImgReq | SDInpaintReq):
235
+ for m in models:
236
+ if m["repo_id"] == request.model:
237
+ pipeline = m['pipeline']
238
+ controlnet, control_mode = get_controlnet(request.controlnet_config) if request.controlnet_config else (None, None)
239
+
240
+ pipe_args = {
241
+ "pipeline": pipeline,
242
+ "control_mode": control_mode,
243
+ }
244
+ if request.controlnet_config:
245
+ pipe_args["controlnet"] = controlnet
246
+
247
+ if not request.photomaker_images:
248
+ if isinstance(request, SDReq):
249
+ pipe_args['pipeline'] = AutoPipelineForText2Image.from_pipe(**pipe_args)
250
+ elif isinstance(request, SDImg2ImgReq):
251
+ pipe_args['pipeline'] = AutoPipelineForImage2Image.from_pipe(**pipe_args)
252
+ elif isinstance(request, SDInpaintReq):
253
+ pipe_args['pipeline'] = AutoPipelineForInpainting.from_pipe(**pipe_args)
254
+ else:
255
+ raise ValueError(f"Unknown request type: {type(request)}")
256
+ elif isinstance(request, any([PhotoMakerStableDiffusionXLPipeline, PhotoMakerStableDiffusionXLControlNetPipeline])):
257
+ if request.controlnet_config:
258
+ pipe_args['pipeline'] = PhotoMakerStableDiffusionXLControlNetPipeline.from_pipe(**pipe_args)
259
+ else:
260
+ pipe_args['pipeline'] = PhotoMakerStableDiffusionXLPipeline.from_pipe(**pipe_args)
261
+ else:
262
+ raise ValueError(f"Invalid request type: {type(request)}")
263
+
264
+ return pipe_args
265
+
266
+
267
+ def load_scheduler(pipeline, scheduler):
268
+ schedulers = {
269
+ "dpmpp_2m": (DPMSolverMultistepScheduler, {}),
270
+ "dpmpp_2m_k": (DPMSolverMultistepScheduler, {"use_karras_sigmas": True}),
271
+ "dpmpp_2m_sde": (DPMSolverMultistepScheduler, {"algorithm_type": "sde-dpmsolver++"}),
272
+ "dpmpp_2m_sde_k": (DPMSolverMultistepScheduler, {"algorithm_type": "sde-dpmsolver++", "use_karras_sigmas": True}),
273
+ "dpmpp_sde": (DPMSolverSinglestepScheduler, {}),
274
+ "dpmpp_sde_k": (DPMSolverSinglestepScheduler, {"use_karras_sigmas": True}),
275
+ "dpm2": (KDPM2DiscreteScheduler, {}),
276
+ "dpm2_k": (KDPM2DiscreteScheduler, {"use_karras_sigmas": True}),
277
+ "dpm2_a": (KDPM2AncestralDiscreteScheduler, {}),
278
+ "dpm2_a_k": (KDPM2AncestralDiscreteScheduler, {"use_karras_sigmas": True}),
279
+ "euler": (EulerDiscreteScheduler, {}),
280
+ "euler_a": (EulerAncestralDiscreteScheduler, {}),
281
+ "heun": (HeunDiscreteScheduler, {}),
282
+ "lms": (LMSDiscreteScheduler, {}),
283
+ "lms_k": (LMSDiscreteScheduler, {"use_karras_sigmas": True}),
284
+ "deis": (DEISMultistepScheduler, {}),
285
+ "unipc": (UniPCMultistepScheduler, {}),
286
+ "fm_euler": (FlowMatchEulerDiscreteScheduler, {}),
287
+ }
288
+ scheduler_class, kwargs = schedulers.get(scheduler, (None, {}))
289
+
290
+ if scheduler_class is not None:
291
+ scheduler = scheduler_class.from_config(pipeline.scheduler.config, **kwargs)
292
+ else:
293
+ raise ValueError(f"Unknown scheduler: {scheduler}")
294
+
295
+ return scheduler
296
+
297
+
298
+ def load_loras(pipeline, loras, fast_generation):
299
+ for i, lora in enumerate(loras):
300
+ pipeline.load_lora_weights(lora['repo_id'], adapter_name=f"lora_{i}")
301
+ adapter_names = [f"lora_{i}" for i in range(len(loras))]
302
+ adapter_weights = [lora['weight'] for lora in loras]
303
+
304
+ if fast_generation:
305
+ hyper_lora = hf_hub_download(
306
+ "ByteDance/Hyper-SD",
307
+ "Hyper-FLUX.1-dev-8steps-lora.safetensors" if isinstance(pipeline, FluxPipeline) else "Hyper-SDXL-2steps-lora.safetensors"
308
+ )
309
+ hyper_weight = 0.125 if isinstance(pipeline, FluxPipeline) else 1.0
310
+ pipeline.load_lora_weights(hyper_lora, adapter_name="hyper_lora")
311
+ adapter_names.append("hyper_lora")
312
+ adapter_weights.append(hyper_weight)
313
+
314
+ pipeline.set_adapters(adapter_names, adapter_weights)
315
+
316
+
317
+ def load_xl_embeddings(pipeline, embeddings):
318
+ for embedding in embeddings:
319
+ state_dict = load_file(hf_hub_download(embedding['repo_id']))
320
+ pipeline.load_textual_inversion(state_dict['clip_g'], token=embedding['token'], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2)
321
+ pipeline.load_textual_inversion(state_dict["clip_l"], token=embedding['token'], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
322
+
323
+
324
+ def resize_images(images: List[Image.Image], height: int, width: int, resize_mode: str):
325
+ for image in images:
326
+ if resize_mode == "resize_only":
327
+ image = image.resize((width, height))
328
+ elif resize_mode == "crop_and_resize":
329
+ image = image.crop((0, 0, width, height))
330
+ elif resize_mode == "resize_and_fill":
331
+ image = image.resize((width, height), Image.Resampling.LANCZOS)
332
+
333
+ return images
334
+
335
+
336
+ def get_controlnet_images(controlnets: List[str], control_images: List[Image.Image], height: int, width: int, resize_mode: str):
337
+ response_images = []
338
+ control_images = resize_images(control_images, height, width, resize_mode)
339
+ for controlnet, image in zip(controlnets, control_images):
340
+ if controlnet == "canny" or controlnet == "canny_xs" or controlnet == "canny_fl":
341
+ processor = Processor('canny')
342
+ elif controlnet == "depth" or controlnet == "depth_xs" or controlnet == "depth_fl":
343
+ processor = Processor('depth_midas')
344
+ elif controlnet == "pose" or controlnet == "pose_fl":
345
+ processor = Processor('openpose_full')
346
+ elif controlnet == "scribble":
347
+ processor = Processor('scribble')
348
+ else:
349
+ raise ValueError(f"Invalid Controlnet: {controlnet}")
350
+
351
+ response_images.append(processor(image, to_pil=True))
352
+
353
+ return response_images
354
+
355
+
356
+ def check_image_safety(images: List[Image.Image]):
357
+ safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
358
+ has_nsfw_concepts = safety_checker(
359
+ images=[images],
360
+ clip_input=safety_checker_input.pixel_values.to("cuda"),
361
+ )
362
+
363
+ return has_nsfw_concepts[1]
364
+
365
+
366
+ def get_prompt_attention(pipeline, prompt, negative_prompt):
367
+ if isinstance(pipeline, (FluxPipeline, FluxImg2ImgPipeline, FluxInpaintPipeline, FluxControlNetPipeline)):
368
+ prompt_embeds, pooled_prompt_embeds = get_weighted_text_embeddings_flux1(pipeline, prompt)
369
+ return prompt_embeds, None, pooled_prompt_embeds, None
370
+ elif isinstance(pipeline, StableDiffusionXLPipeline):
371
+ prompt_embeds, prompt_neg_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = get_weighted_text_embeddings_sdxl(pipeline, prompt, negative_prompt)
372
+ return prompt_embeds, prompt_neg_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
373
+ else:
374
+ raise ValueError(f"Invalid pipeline type: {type(pipeline)}")
375
+
376
+
377
+ def get_photomaker_images(photomaker_images: List[Image.Image], height: int, width: int, resize_mode: str):
378
+ image_input_ids = []
379
+ image_id_embeds = []
380
+ photomaker_images = resize_images(photomaker_images, height, width, resize_mode)
381
+
382
+ for image in photomaker_images:
383
+ image_input_ids.append(img)
384
+ img = np.array(image)[:, :, ::-1]
385
+ faces = analyze_faces(face_detector, image)
386
+ if len(faces) > 0:
387
+ image_id_embeds.append(torch.from_numpy(faces[0]['embeddings']))
388
+ else:
389
+ raise ValueError("No face detected in the image")
390
+
391
+ return image_input_ids, image_id_embeds
392
+
393
+
394
+ def cleanup(pipeline, loras = None, embeddings = None):
395
+ if loras:
396
+ pipeline.disable_lora()
397
+ pipeline.unload_lora_weights()
398
+ if embeddings:
399
+ pipeline.unload_textual_inversion()
400
+ gc.collect()
401
+ torch.cuda.empty_cache()
402
+
403
+
404
+ # Gen function
405
+ def gen_img(
406
+ request: SDReq | SDImg2ImgReq | SDInpaintReq
407
+ ):
408
+ pipeline_args = get_pipe(request)
409
+ pipeline = pipeline_args['pipeline']
410
+ try:
411
+ pipeline.scheduler = load_scheduler(pipeline, request.scheduler)
412
+
413
+ load_loras(pipeline, request.loras, request.fast_generation)
414
+ load_xl_embeddings(pipeline, request.embeddings)
415
+
416
+ control_images = get_controlnet_images(request.controlnet_config.controlnets, request.controlnet_config.control_images, request.height, request.width, request.resize_mode) if request.controlnet_config else None
417
+ photomaker_images, photomaker_id_embeds = get_photomaker_images(request.photomaker_images, request.height, request.width) if request.photomaker_images else (None, None)
418
+
419
+ positive_prompt_embeds, negative_prompt_embeds, positive_prompt_pooled, negative_prompt_pooled = get_prompt_attention(pipeline, request.prompt, request.negative_prompt)
420
+
421
+ # Common args
422
+ args = {
423
+ 'prompt_embeds': positive_prompt_embeds,
424
+ 'pooled_prompt_embeds': positive_prompt_pooled,
425
+ 'height': request.height,
426
+ 'width': request.width,
427
+ 'num_images_per_prompt': request.num_images_per_prompt,
428
+ 'num_inference_steps': request.num_inference_steps,
429
+ 'guidance_scale': request.guidance_scale,
430
+ 'generator': [torch.Generator(device=device).manual_seed(request.seed + i) if not request.seed is any([None, 0, -1]) else torch.Generator(device=device).manual_seed(random.randint(0, 2**32 - 1)) for i in range(request.num_images_per_prompt)],
431
+ }
432
+
433
+ if isinstance(pipeline, any([StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline,
434
+ StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetInpaintPipeline])):
435
+ args['clip_skip'] = request.clip_skip
436
+ args['negative_prompt_embeds'] = negative_prompt_embeds
437
+ args['negative_pooled_prompt_embeds'] = negative_prompt_pooled
438
+
439
+ if isinstance(pipeline, FluxControlNetPipeline) and request.controlnet_config:
440
+ args['control_mode'] = pipeline_args['control_mode']
441
+ args['control_image'] = control_images
442
+ args['controlnet_conditioning_scale'] = request.controlnet_conditioning_scale
443
+
444
+ if not isinstance(pipeline, FluxControlNetPipeline) and request.controlnet_config:
445
+ args['controlnet_conditioning_scale'] = request.controlnet_conditioning_scale
446
+
447
+ if isinstance(request, SDReq):
448
+ args['image'] = control_images
449
+ elif isinstance(request, (SDImg2ImgReq, SDInpaintReq)):
450
+ args['control_image'] = control_images
451
+
452
+ if request.photomaker_images and isinstance(pipeline, any([PhotoMakerStableDiffusionXLPipeline, PhotoMakerStableDiffusionXLControlNetPipeline])):
453
+ args['input_id_images'] = photomaker_images
454
+ args['input_id_embeds'] = photomaker_id_embeds
455
+ args['start_merge_step'] = 10
456
+
457
+ if isinstance(request, SDImg2ImgReq):
458
+ args['image'] = resize_images([request.image], request.height, request.width, request.resize_mode)
459
+ args['strength'] = request.strength
460
+ elif isinstance(request, SDInpaintReq):
461
+ args['image'] = resize_images([request.image], request.height, request.width, request.resize_mode)
462
+ args['mask_image'] = resize_images([request.mask_image], request.height, request.width, request.resize_mode)
463
+ args['strength'] = request.strength
464
+
465
+ images = pipeline(**args).images
466
+
467
+ if request.refiner:
468
+ images = refiner(
469
+ prompt=request.prompt,
470
+ num_inference_steps=40,
471
+ denoising_start=0.7,
472
+ image=images.images
473
+ ).images
474
+
475
+ cleanup(pipeline, request.loras, request.embeddings)
476
+
477
+ return images
478
+ except Exception as e:
479
+ cleanup(pipeline, request.loras, request.embeddings)
480
+ raise ValueError(f"Error generating image: {e}") from e
481
+
482
+
483
+ # CSS
484
+ css = """
485
+ @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;600&display=swap');
486
+ body {
487
+ font-family: 'Poppins', sans-serif !important;
488
+ }
489
+ .center-content {
490
+ text-align: center;
491
+ max-width: 600px;
492
+ margin: 0 auto;
493
+ padding: 20px;
494
+ }
495
+ .center-content h1 {
496
+ font-weight: 600;
497
+ margin-bottom: 1rem;
498
+ }
499
+ .center-content p {
500
+ margin-bottom: 1.5rem;
501
+ }
502
+ """
503
+
504
+
505
+ flux_models = ["black-forest-labs/FLUX.1-dev"]
506
+ with open("data/images/loras/flux.json", "r") as f:
507
+ loras = json.load(f)
508
+
509
+
510
+ # Event functions
511
+ def update_fast_generation(model, fast_generation):
512
+ if fast_generation:
513
+ return (
514
+ gr.update(
515
+ value=3.5
516
+ ),
517
+ gr.update(
518
+ value=8
519
+ )
520
+ )
521
+
522
+
523
+ def selected_lora_from_gallery(evt: gr.SelectData):
524
+ return (
525
+ gr.update(
526
+ value=evt.index
527
+ )
528
+ )
529
+
530
+
531
+ def update_selected_lora(custom_lora):
532
+ link = custom_lora.split("/")
533
+
534
+ if len(link) == 2:
535
+ model_card = ModelCard.load(custom_lora)
536
+ trigger_word = model_card.data.get("instance_prompt", "")
537
+ image_url = f"""https://huggingface.co/{custom_lora}/resolve/main/{model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)}"""
538
+
539
+ custom_lora_info_css = """
540
+ <style>
541
+ .custom-lora-info {
542
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
543
+ background: linear-gradient(135deg, #4a90e2, #7b61ff);
544
+ color: white;
545
+ padding: 16px;
546
+ border-radius: 8px;
547
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
548
+ margin: 16px 0;
549
+ }
550
+ .custom-lora-header {
551
+ font-size: 18px;
552
+ font-weight: 600;
553
+ margin-bottom: 12px;
554
+ }
555
+ .custom-lora-content {
556
+ display: flex;
557
+ align-items: center;
558
+ background-color: rgba(255, 255, 255, 0.1);
559
+ border-radius: 6px;
560
+ padding: 12px;
561
+ }
562
+ .custom-lora-image {
563
+ width: 80px;
564
+ height: 80px;
565
+ object-fit: cover;
566
+ border-radius: 6px;
567
+ margin-right: 16px;
568
+ }
569
+ .custom-lora-text h3 {
570
+ margin: 0 0 8px 0;
571
+ font-size: 16px;
572
+ font-weight: 600;
573
+ }
574
+ .custom-lora-text small {
575
+ font-size: 14px;
576
+ opacity: 0.9;
577
+ }
578
+ .custom-trigger-word {
579
+ background-color: rgba(255, 255, 255, 0.2);
580
+ padding: 2px 6px;
581
+ border-radius: 4px;
582
+ font-weight: 600;
583
+ }
584
+ </style>
585
+ """
586
+
587
+ custom_lora_info_html = f"""
588
+ <div class="custom-lora-info">
589
+ <div class="custom-lora-header">Custom LoRA: {custom_lora}</div>
590
+ <div class="custom-lora-content">
591
+ <img class="custom-lora-image" src="{image_url}" alt="LoRA preview">
592
+ <div class="custom-lora-text">
593
+ <h3>{link[1].replace("-", " ").replace("_", " ")}</h3>
594
+ <small>{"Using: <span class='custom-trigger-word'>"+trigger_word+"</span> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}</small>
595
+ </div>
596
+ </div>
597
+ </div>
598
+ """
599
+
600
+ custom_lora_info_html = f"{custom_lora_info_css}{custom_lora_info_html}"
601
+
602
+ return (
603
+ gr.update( # selected_lora
604
+ value=custom_lora,
605
+ ),
606
+ gr.update( # custom_lora_info
607
+ value=custom_lora_info_html,
608
+ visible=True
609
+ )
610
+ )
611
+
612
+ else:
613
+ return (
614
+ gr.update( # selected_lora
615
+ value=custom_lora,
616
+ ),
617
+ gr.update( # custom_lora_info
618
+ value=custom_lora_info_html if len(link) == 0 else "",
619
+ visible=False
620
+ )
621
+ )
622
+
623
+
624
+ def add_to_enabled_loras(model, selected_lora, enabled_loras):
625
+ lora_data = loras
626
+ try:
627
+ selected_lora = int(selected_lora)
628
+
629
+ if 0 <= selected_lora: # is the index of the lora in the gallery
630
+ lora_info = lora_data[selected_lora]
631
+ enabled_loras.append({
632
+ "repo_id": lora_info["repo"],
633
+ "trigger_word": lora_info["trigger_word"]
634
+ })
635
+ except ValueError:
636
+ link = selected_lora.split("/")
637
+ if len(link) == 2:
638
+ model_card = ModelCard.load(selected_lora)
639
+ trigger_word = model_card.data.get("instance_prompt", "")
640
+ enabled_loras.append({
641
+ "repo_id": selected_lora,
642
+ "trigger_word": trigger_word
643
+ })
644
+
645
+ return (
646
+ gr.update( # selected_lora
647
+ value=""
648
+ ),
649
+ gr.update( # custom_lora_info
650
+ value="",
651
+ visible=False
652
+ ),
653
+ gr.update( # enabled_loras
654
+ value=enabled_loras
655
+ )
656
+ )
657
+
658
+
659
+ def update_lora_sliders(enabled_loras):
660
+ sliders = []
661
+ remove_buttons = []
662
+
663
+ for lora in enabled_loras:
664
+ sliders.append(
665
+ gr.update(
666
+ label=lora.get("repo_id", ""),
667
+ info=f"Trigger Word: {lora.get('trigger_word', '')}",
668
+ visible=True,
669
+ interactive=True
670
+ )
671
+ )
672
+ remove_buttons.append(
673
+ gr.update(
674
+ visible=True,
675
+ interactive=True
676
+ )
677
+ )
678
+
679
+ if len(sliders) < 6:
680
+ for i in range(len(sliders), 6):
681
+ sliders.append(
682
+ gr.update(
683
+ visible=False
684
+ )
685
+ )
686
+ remove_buttons.append(
687
+ gr.update(
688
+ visible=False
689
+ )
690
+ )
691
+
692
+ return *sliders, *remove_buttons
693
+
694
+
695
+ def remove_from_enabled_loras(enabled_loras, index):
696
+ enabled_loras.pop(index)
697
+ return (
698
+ gr.update(
699
+ value=enabled_loras
700
+ )
701
+ )
702
+
703
+
704
+ @spaces.GPU
705
+ def generate_image(
706
+ model, prompt, negative_prompt, fast_generation, enabled_loras,
707
+ lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5,
708
+ img2img_image, inpaint_image, canny_image, pose_image, depth_image,
709
+ img2img_strength, inpaint_strength, canny_strength, pose_strength, depth_strength,
710
+ resize_mode,
711
+ scheduler, image_height, image_width, image_num_images_per_prompt,
712
+ image_num_inference_steps, image_guidance_scale, image_seed,
713
+ refiner, vae
714
+ ):
715
+ base_args = {
716
+ "model": model,
717
+ "prompt": prompt,
718
+ "negative_prompt": negative_prompt,
719
+ "fast_generation": fast_generation,
720
+ "loras": None,
721
+ "resize_mode": resize_mode,
722
+ "scheduler": scheduler,
723
+ "height": int(image_height),
724
+ "width": int(image_width),
725
+ "num_images_per_prompt": float(image_num_images_per_prompt),
726
+ "num_inference_steps": float(image_num_inference_steps),
727
+ "guidance_scale": float(image_guidance_scale),
728
+ "seed": int(image_seed),
729
+ "refiner": refiner,
730
+ "vae": vae,
731
+ "controlnet_config": None,
732
+ }
733
+ base_args = SDReq(**base_args)
734
+
735
+ if len(enabled_loras) > 0:
736
+ base_args.loras = []
737
+ for enabled_lora, lora_slider in zip(enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5]):
738
+ if enabled_lora.get("repo_id", None):
739
+ base_args.loras.append(
740
+ {
741
+ "repo_id": enabled_lora["repo_id"],
742
+ "weight": lora_slider
743
+ }
744
+ )
745
+
746
+ image = None
747
+ mask_image = None
748
+ strength = None
749
+
750
+ if img2img_image:
751
+ image = img2img_image
752
+ strength = float(img2img_strength)
753
+
754
+ base_args = SDImg2ImgReq(
755
+ **base_args.__dict__,
756
+ image=image,
757
+ strength=strength
758
+ )
759
+ elif inpaint_image:
760
+ image = inpaint_image['background'] if not all(pixel == (0, 0, 0) for pixel in list(inpaint_image['background'].getdata())) else None
761
+ mask_image = inpaint_image['layers'][0] if image else None
762
+ strength = float(inpaint_strength)
763
+
764
+ base_args = SDInpaintReq(
765
+ **base_args.__dict__,
766
+ image=image,
767
+ mask_image=mask_image,
768
+ strength=strength
769
+ )
770
+ elif any([canny_image, pose_image, depth_image]):
771
+ base_args.controlnet_config = ControlNetReq(
772
+ controlnets=[],
773
+ control_images=[],
774
+ controlnet_conditioning_scale=[]
775
+ )
776
+
777
+ if canny_image:
778
+ base_args.controlnet_config.controlnets.append("canny_fl")
779
+ base_args.controlnet_config.control_images.append(canny_image)
780
+ base_args.controlnet_config.controlnet_conditioning_scale.append(float(canny_strength))
781
+ if pose_image:
782
+ base_args.controlnet_config.controlnets.append("pose_fl")
783
+ base_args.controlnet_config.control_images.append(pose_image)
784
+ base_args.controlnet_config.controlnet_conditioning_scale.append(float(pose_strength))
785
+ if depth_image:
786
+ base_args.controlnet_config.controlnets.append("depth_fl")
787
+ base_args.controlnet_config.control_images.append(depth_image)
788
+ base_args.controlnet_config.controlnet_conditioning_scale.append(float(depth_strength))
789
+ else:
790
+ base_args = SDReq(**base_args.__dict__)
791
+
792
+ images = gen_img(base_args)
793
+
794
+ return (
795
+ gr.update(
796
+ value=images,
797
+ interactive=True
798
+ )
799
+ )
800
+
801
+
802
+ # Main Gradio app
803
+ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
804
+ # Header
805
+ with gr.Column(elem_classes="center-content"):
806
+ gr.Markdown("""
807
+ # πŸš€ AAI: All AI
808
+ Unleash your creativity with our multi-modal AI platform.
809
+ [![Sync code to HF Space](https://github.com/mantrakp04/aai/actions/workflows/hf-space.yml/badge.svg)](https://github.com/mantrakp04/aai/actions/workflows/hf-space.yml)
810
+ """)
811
+
812
+ # Tabs
813
+ with gr.Tabs():
814
+ with gr.Tab(label="πŸ–ΌοΈ Image"):
815
+ with gr.Tabs():
816
+ with gr.Tab("Flux"):
817
+ """
818
+ Create the image tab for Generative Image Generation Models
819
+
820
+ Args:
821
+ models: list
822
+ A list containing the models repository paths
823
+ gap_iol, gap_la, gap_le, gap_eio, gap_io: Optional[List[dict]]
824
+ A list of dictionaries containing the title and component for the custom gradio component
825
+ Example:
826
+ def gr_comp():
827
+ gr.Label("Hello World")
828
+
829
+ [
830
+ {
831
+ 'title': "Title",
832
+ 'component': gr_comp()
833
+ }
834
+ ]
835
+ loras: list
836
+ A list of dictionaries containing the image and title for the Loras Gallery
837
+ Generally a loaded json file from the data folder
838
+
839
+ """
840
+ def process_gaps(gaps: List[dict]):
841
+ for gap in gaps:
842
+ with gr.Accordion(gap['title']):
843
+ gap['component']
844
+
845
+
846
+ with gr.Row():
847
+ with gr.Column():
848
+ with gr.Group() as image_options:
849
+ model = gr.Dropdown(label="Models", choices=flux_models, value=flux_models[0], interactive=True)
850
+ prompt = gr.Textbox(lines=5, label="Prompt")
851
+ negative_prompt = gr.Textbox(label="Negative Prompt")
852
+ fast_generation = gr.Checkbox(label="Fast Generation (Hyper-SD) πŸ§ͺ")
853
+
854
+
855
+ with gr.Accordion("Loras", open=True): # Lora Gallery
856
+ lora_gallery = gr.Gallery(
857
+ label="Gallery",
858
+ value=[(lora['image'], lora['title']) for lora in loras],
859
+ allow_preview=False,
860
+ columns=[3],
861
+ type="pil"
862
+ )
863
+
864
+ with gr.Group():
865
+ with gr.Column():
866
+ with gr.Row():
867
+ custom_lora = gr.Textbox(label="Custom Lora", info="Enter a Huggingface repo path")
868
+ selected_lora = gr.Textbox(label="Selected Lora", info="Choose from the gallery or enter a custom LoRA")
869
+
870
+ custom_lora_info = gr.HTML(visible=False)
871
+ add_lora = gr.Button(value="Add LoRA")
872
+
873
+ enabled_loras = gr.State(value=[])
874
+ with gr.Group():
875
+ with gr.Row():
876
+ for i in range(6): # only support max 6 loras due to inference time
877
+ with gr.Column():
878
+ with gr.Column(scale=2):
879
+ globals()[f"lora_slider_{i}"] = gr.Slider(label=f"LoRA {i+1}", minimum=0, maximum=1, step=0.01, value=0.8, visible=False, interactive=True)
880
+ with gr.Column():
881
+ globals()[f"lora_remove_{i}"] = gr.Button(value="Remove LoRA", visible=False)
882
+
883
+
884
+ with gr.Accordion("Embeddings", open=False): # Embeddings
885
+ gr.Label("To be implemented")
886
+
887
+
888
+ with gr.Accordion("Image Options"): # Image Options
889
+ with gr.Tabs():
890
+ image_options = {
891
+ "img2img": "Upload Image",
892
+ "inpaint": "Upload Image",
893
+ "canny": "Upload Image",
894
+ "pose": "Upload Image",
895
+ "depth": "Upload Image",
896
+ }
897
+
898
+ for image_option, label in image_options.items():
899
+ with gr.Tab(image_option):
900
+ if not image_option in ['inpaint', 'scribble']:
901
+ globals()[f"{image_option}_image"] = gr.Image(label=label, type="pil")
902
+ elif image_option in ['inpaint', 'scribble']:
903
+ globals()[f"{image_option}_image"] = gr.ImageEditor(
904
+ label=label,
905
+ image_mode='RGB',
906
+ layers=False,
907
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed") if image_option == 'inpaint' else gr.Brush(),
908
+ interactive=True,
909
+ type="pil",
910
+ )
911
+
912
+ # Image Strength (Co-relates to controlnet strength, strength for img2img n inpaint)
913
+ globals()[f"{image_option}_strength"] = gr.Slider(label="Strength", minimum=0, maximum=1, step=0.01, value=1.0, interactive=True)
914
+
915
+ resize_mode = gr.Radio(
916
+ label="Resize Mode",
917
+ choices=["crop and resize", "resize only", "resize and fill"],
918
+ value="resize and fill",
919
+ interactive=True
920
+ )
921
+
922
+
923
+ with gr.Column():
924
+ with gr.Group():
925
+ output_images = gr.Gallery(
926
+ label="Output Images",
927
+ value=[],
928
+ allow_preview=True,
929
+ type="pil",
930
+ interactive=False,
931
+ )
932
+ generate_images = gr.Button(value="Generate Images", variant="primary")
933
+
934
+ with gr.Accordion("Advance Settings", open=True):
935
+ with gr.Row():
936
+ scheduler = gr.Dropdown(
937
+ label="Scheduler",
938
+ choices = [
939
+ "fm_euler"
940
+ ],
941
+ value="fm_euler",
942
+ interactive=True
943
+ )
944
+
945
+ with gr.Row():
946
+ for column in range(2):
947
+ with gr.Column():
948
+ options = [
949
+ ("Height", "image_height", 64, 1024, 64, 1024, True),
950
+ ("Width", "image_width", 64, 1024, 64, 1024, True),
951
+ ("Num Images Per Prompt", "image_num_images_per_prompt", 1, 4, 1, 1, True),
952
+ ("Num Inference Steps", "image_num_inference_steps", 1, 100, 1, 20, True),
953
+ ("Clip Skip", "image_clip_skip", 0, 2, 1, 2, False),
954
+ ("Guidance Scale", "image_guidance_scale", 0, 20, 0.5, 3.5, True),
955
+ ("Seed", "image_seed", 0, 100000, 1, random.randint(0, 100000), True),
956
+ ]
957
+ for label, var_name, min_val, max_val, step, value, visible in options[column::2]:
958
+ globals()[var_name] = gr.Slider(label=label, minimum=min_val, maximum=max_val, step=step, value=value, visible=visible, interactive=True)
959
+
960
+ with gr.Row():
961
+ refiner = gr.Checkbox(
962
+ label="Refiner πŸ§ͺ",
963
+ value=False,
964
+ )
965
+ vae = gr.Checkbox(
966
+ label="VAE",
967
+ value=True,
968
+ )
969
+
970
+
971
+ # Events
972
+ # Base Options
973
+ fast_generation.change(update_fast_generation, [model, fast_generation], [image_guidance_scale, image_num_inference_steps]) # Fast Generation # type: ignore
974
+
975
+
976
+ # Lora Gallery
977
+ lora_gallery.select(selected_lora_from_gallery, None, selected_lora)
978
+ custom_lora.change(update_selected_lora, custom_lora, [custom_lora, selected_lora])
979
+ add_lora.click(add_to_enabled_loras, [model, selected_lora, enabled_loras], [selected_lora, custom_lora_info, enabled_loras])
980
+ enabled_loras.change(update_lora_sliders, enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5, lora_remove_0, lora_remove_1, lora_remove_2, lora_remove_3, lora_remove_4, lora_remove_5]) # type: ignore
981
+
982
+ for i in range(6):
983
+ globals()[f"lora_remove_{i}"].click(
984
+ lambda enabled_loras, index=i: remove_from_enabled_loras(enabled_loras, index),
985
+ [enabled_loras],
986
+ [enabled_loras]
987
+ )
988
+
989
+
990
+ # Generate Image
991
+ generate_images.click(
992
+ generate_image, # type: ignore
993
+ [
994
+ model, prompt, negative_prompt, fast_generation, enabled_loras,
995
+ lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5, # type: ignore
996
+ img2img_image, inpaint_image, canny_image, pose_image, depth_image, # type: ignore
997
+ img2img_strength, inpaint_strength, canny_strength, pose_strength, depth_strength, # type: ignore
998
+ resize_mode,
999
+ scheduler, image_height, image_width, image_num_images_per_prompt, # type: ignore
1000
+ image_num_inference_steps, image_guidance_scale, image_seed, # type: ignore
1001
+ refiner, vae
1002
+ ],
1003
+ [output_images]
1004
+ )
1005
+ with gr.Tab("SDXL"):
1006
+ gr.Label("To be implemented")
1007
+ with gr.Tab(label="🎡 Audio"):
1008
+ gr.Label("Coming soon!")
1009
+ with gr.Tab(label="🎬 Video"):
1010
+ gr.Label("Coming soon!")
1011
+ with gr.Tab(label="πŸ“„ Text"):
1012
+ gr.Label("Coming soon!")
1013
+
1014
+
1015
+ demo.launch(
1016
+ share=False,
1017
+ debug=True,
1018
+ )