forplaytvplus commited on
Commit
d49eeee
·
verified ·
1 Parent(s): b076820

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -45
app.py CHANGED
@@ -10,22 +10,27 @@ import gradio as gr
10
  import numpy as np
11
  import spaces
12
  import torch
 
13
  from PIL import Image
14
  from io import BytesIO
15
- from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image
 
 
16
 
17
  DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
24
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
26
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
 
27
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
28
- ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_VAE", "1") == "1"
 
 
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
@@ -34,10 +39,9 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
34
  seed = random.randint(0, MAX_SEED)
35
  return seed
36
 
37
-
38
  @spaces.GPU
39
  def generate(
40
- prompt: str,
41
  negative_prompt: str = "",
42
  prompt_2: str = "",
43
  negative_prompt_2: str = "",
@@ -49,40 +53,88 @@ def generate(
49
  height: int = 1024,
50
  guidance_scale_base: float = 5.0,
51
  num_inference_steps_base: int = 25,
 
 
 
52
  strength_img2img: float = 0.7,
53
  use_vae: bool = False,
54
  use_lora: bool = False,
 
55
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
56
  vaecall = 'madebyollin/sdxl-vae-fp16-fix',
57
  lora = '',
 
 
58
  lora_scale: float = 0.7,
 
59
  use_img2img: bool = False,
 
 
60
  url = '',
 
 
61
  ):
62
  if torch.cuda.is_available():
63
-
64
- if not use_img2img:
65
- pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
66
 
 
 
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
- pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
-
71
  if use_img2img:
72
- pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
 
 
73
 
74
  if use_vae:
75
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
- pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
-
78
- response = requests.get(url)
79
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
80
- init_image = init_image.resize((width, height))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  if use_lora:
83
- pipe.load_lora_weights(lora)
84
- pipe.fuse_lora(lora_scale)
85
-
 
 
 
 
 
86
  if ENABLE_CPU_OFFLOAD:
87
  pipe.enable_model_cpu_offload()
88
 
@@ -91,9 +143,9 @@ def generate(
91
 
92
  if USE_TORCH_COMPILE:
93
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
94
-
95
  generator = torch.Generator().manual_seed(seed)
96
-
97
  if not use_negative_prompt:
98
  negative_prompt = None # type: ignore
99
  if not use_prompt_2:
@@ -101,20 +153,41 @@ def generate(
101
  if not use_negative_prompt_2:
102
  negative_prompt_2 = None # type: ignore
103
 
104
- if not use_img2img:
105
- return pipe(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  prompt=prompt,
 
 
 
 
107
  negative_prompt=negative_prompt,
108
  prompt_2=prompt_2,
109
- negative_prompt_2=negative_prompt_2,
110
  width=width,
111
  height=height,
 
112
  guidance_scale=guidance_scale_base,
113
  num_inference_steps=num_inference_steps_base,
114
  generator=generator,
115
- output_type="pil",
116
  ).images[0]
117
- else:
 
118
  images = pipe(
119
  prompt=prompt,
120
  image=init_image,
@@ -127,14 +200,20 @@ def generate(
127
  guidance_scale=guidance_scale_base,
128
  num_inference_steps=num_inference_steps_base,
129
  generator=generator,
130
- output_type="pil",
131
  ).images[0]
132
  return images
133
-
134
- examples = [
135
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
136
- "An astronaut riding a green horse",
137
- ]
 
 
 
 
 
 
 
138
 
139
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
140
  gr.HTML(
@@ -144,16 +223,28 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
144
  with gr.Group():
145
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
146
  vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
147
- lora = gr.Text(label='LoRA', placeholder='e.g. nerijs/pixel-art-xl')
 
 
148
  lora_scale = gr.Slider(
149
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
150
- label="Lora Scale",
 
 
 
 
 
 
 
 
151
  minimum=0.01,
152
  maximum=1,
153
  step=0.01,
154
  value=0.7,
155
  )
156
- url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
 
 
157
  with gr.Row():
158
  prompt = gr.Text(
159
  placeholder="Input prompt",
@@ -166,9 +257,12 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
166
  result = gr.Image(label="Result", show_label=False)
167
  with gr.Accordion("Advanced options", open=False):
168
  with gr.Row():
 
 
169
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
170
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
171
- use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
 
172
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
173
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
174
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
@@ -233,6 +327,33 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
233
  step=1,
234
  value=25,
235
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  with gr.Row():
237
  strength_img2img = gr.Slider(
238
  info="Strength for Img2Img",
@@ -243,14 +364,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
243
  value=0.7,
244
  )
245
 
246
- gr.Examples(
247
- examples=examples,
248
- inputs=prompt,
249
- outputs=result,
250
- fn=generate,
251
- cache_examples=CACHE_EXAMPLES,
252
- )
253
-
254
  use_negative_prompt.change(
255
  fn=lambda x: gr.update(visible=x),
256
  inputs=use_negative_prompt,
@@ -286,6 +399,13 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
286
  queue=False,
287
  api_name=False,
288
  )
 
 
 
 
 
 
 
289
  use_img2img.change(
290
  fn=lambda x: gr.update(visible=x),
291
  inputs=use_img2img,
@@ -293,6 +413,20 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
293
  queue=False,
294
  api_name=False,
295
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  gr.on(
298
  triggers=[
@@ -322,19 +456,30 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
322
  height,
323
  guidance_scale_base,
324
  num_inference_steps_base,
 
 
 
325
  strength_img2img,
326
  use_vae,
327
  use_lora,
 
328
  model,
329
  vaecall,
330
  lora,
 
 
331
  lora_scale,
 
332
  use_img2img,
 
 
333
  url,
 
 
334
  ],
335
  outputs=result,
336
  api_name="run",
337
  )
338
 
339
  if __name__ == "__main__":
340
- demo.queue(max_size=20).launch()
 
10
  import numpy as np
11
  import spaces
12
  import torch
13
+ import cv2
14
  from PIL import Image
15
  from io import BytesIO
16
+ from diffusers.utils import load_image
17
+ from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
18
+ from controlnet_aux import HEDdetector
19
 
20
  DESCRIPTION = "# Run any LoRA or SD Model"
21
  if not torch.cuda.is_available():
22
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
23
 
24
  MAX_SEED = np.iinfo(np.int32).max
 
25
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
26
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
27
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
28
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
29
+ ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1"
30
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
31
+ ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
32
+ ENABLE_USE_CONTROLNET = os.getenv("ENABLE_USE_CONTROLNET", "1") == "1"
33
+ ENABLE_USE_CONTROLNETINPAINT = os.getenv("ENABLE_USE_CONTROLNETINPAINT", "1") == "1"
34
 
35
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
 
 
39
  seed = random.randint(0, MAX_SEED)
40
  return seed
41
 
 
42
  @spaces.GPU
43
  def generate(
44
+ prompt: str = "",
45
  negative_prompt: str = "",
46
  prompt_2: str = "",
47
  negative_prompt_2: str = "",
 
53
  height: int = 1024,
54
  guidance_scale_base: float = 5.0,
55
  num_inference_steps_base: int = 25,
56
+ controlnet_conditioning_scale: float = 1,
57
+ control_guidance_start: float = 0,
58
+ control_guidance_end: float = 1,
59
  strength_img2img: float = 0.7,
60
  use_vae: bool = False,
61
  use_lora: bool = False,
62
+ use_lora2: bool = False,
63
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
64
  vaecall = 'madebyollin/sdxl-vae-fp16-fix',
65
  lora = '',
66
+ lora2 = '',
67
+ controlnet_model = 'diffusers/controlnet-canny-sdxl-1.0',
68
  lora_scale: float = 0.7,
69
+ lora_scale2: float = 0.7,
70
  use_img2img: bool = False,
71
+ use_controlnet: bool = False,
72
+ use_controlnetinpaint: bool = False,
73
  url = '',
74
+ controlnet_img = '',
75
+ controlnet_inpaint = '',
76
  ):
77
  if torch.cuda.is_available():
 
 
 
78
 
79
+ if not use_img2img:
80
+ pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
81
+
82
  if use_vae:
83
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
84
+ pipe = DiffusionPipeline.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
85
+
86
  if use_img2img:
87
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
88
+
89
+ init_image = load_image(url)
90
 
91
  if use_vae:
92
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
93
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
94
+
95
+ if use_controlnet:
96
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
97
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16)
98
+
99
+ image = load_image(controlnet_img)
100
+
101
+ image = np.array(image)
102
+ image = cv2.Canny(image, 250, 255)
103
+ image = image[:, :, None]
104
+ image = np.concatenate([image, image, image], axis=2)
105
+ image = Image.fromarray(image)
106
+
107
+ if use_vae:
108
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
109
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
110
+
111
+ if use_controlnetinpaint:
112
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
113
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16)
114
 
115
+ image_start = load_image(controlnet_img)
116
+ image = load_image(controlnet_img)
117
+ image_mask = load_image(controlnet_img2img)
118
+
119
+ image = np.array(image)
120
+ image = cv2.Canny(image, 100, 200)
121
+ image = image[:, :, None]
122
+ image = np.concatenate([image, image, image], axis=2)
123
+ image = Image.fromarray(image)
124
+
125
+ if use_vae:
126
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
127
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
128
+
129
  if use_lora:
130
+ pipe.load_lora_weights(lora, adapter_name="1")
131
+ pipe.set_adapters("1", adapter_weights=[lora_scale])
132
+
133
+ if use_lora2:
134
+ pipe.load_lora_weights(lora, adapter_name="1")
135
+ pipe.load_lora_weights(lora2, adapter_name="2")
136
+ pipe.set_adapters(["1", "2"], adapter_weights=[lora_scale, lora_scale2])
137
+
138
  if ENABLE_CPU_OFFLOAD:
139
  pipe.enable_model_cpu_offload()
140
 
 
143
 
144
  if USE_TORCH_COMPILE:
145
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
146
+
147
  generator = torch.Generator().manual_seed(seed)
148
+
149
  if not use_negative_prompt:
150
  negative_prompt = None # type: ignore
151
  if not use_prompt_2:
 
153
  if not use_negative_prompt_2:
154
  negative_prompt_2 = None # type: ignore
155
 
156
+ if use_controlnetinpaint:
157
+ image = pipe(
158
+ prompt=prompt,
159
+ strength=strength_img2img,
160
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
161
+ eta=0.0,
162
+ mask_image=image_mask,
163
+ image=image_start,
164
+ control_image=image,
165
+ negative_prompt=negative_prompt,
166
+ width=width,
167
+ height=height,
168
+ guidance_scale=guidance_scale_base,
169
+ num_inference_steps=num_inference_steps_base,
170
+ generator=generator,
171
+ ).images[0]
172
+ return image
173
+ if use_controlnet:
174
+ image = pipe(
175
  prompt=prompt,
176
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
177
+ control_guidance_start=control_guidance_start,
178
+ control_guidance_end=control_guidance_end,
179
+ image=image,
180
  negative_prompt=negative_prompt,
181
  prompt_2=prompt_2,
 
182
  width=width,
183
  height=height,
184
+ negative_prompt_2=negative_prompt_2,
185
  guidance_scale=guidance_scale_base,
186
  num_inference_steps=num_inference_steps_base,
187
  generator=generator,
 
188
  ).images[0]
189
+ return image
190
+ elif use_img2img:
191
  images = pipe(
192
  prompt=prompt,
193
  image=init_image,
 
200
  guidance_scale=guidance_scale_base,
201
  num_inference_steps=num_inference_steps_base,
202
  generator=generator,
 
203
  ).images[0]
204
  return images
205
+ else:
206
+ return pipe(
207
+ prompt=prompt,
208
+ negative_prompt=negative_prompt,
209
+ prompt_2=prompt_2,
210
+ negative_prompt_2=negative_prompt_2,
211
+ width=width,
212
+ height=height,
213
+ guidance_scale=guidance_scale_base,
214
+ num_inference_steps=num_inference_steps_base,
215
+ generator=generator,
216
+ ).images[0]
217
 
218
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
219
  gr.HTML(
 
223
  with gr.Group():
224
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
225
  vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
226
+ lora = gr.Text(label='LoRA 1', placeholder='e.g. nerijs/pixel-art-xl')
227
+ lora2 = gr.Text(label='LoRA 2', placeholder='e.g. nerijs/pixel-art-xl')
228
+ controlnet_model = gr.Text(label='Controlnet', placeholder='e.g diffusers/controlnet-canny-sdxl-1.0')
229
  lora_scale = gr.Slider(
230
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
231
+ label="Lora Scale 1",
232
+ minimum=0.01,
233
+ maximum=1,
234
+ step=0.01,
235
+ value=0.7,
236
+ )
237
+ lora_scale2 = gr.Slider(
238
+ info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
239
+ label="Lora Scale 2",
240
  minimum=0.01,
241
  maximum=1,
242
  step=0.01,
243
  value=0.7,
244
  )
245
+ url = gr.Text(label='URL (Img2Img)')
246
+ controlnet_img = gr.Text(label='URL (Controlnet)', placeholder='e.g https://example.com/image.png')
247
+ controlnet_inpaint = gr.Text(label='URL (Controlnet - IMG2IMG)', placeholder='e.g https://example.com/image.png')
248
  with gr.Row():
249
  prompt = gr.Text(
250
  placeholder="Input prompt",
 
257
  result = gr.Image(label="Result", show_label=False)
258
  with gr.Accordion("Advanced options", open=False):
259
  with gr.Row():
260
+ use_controlnet = gr.Checkbox(label='Use Controlnet', value=False, visible=ENABLE_USE_CONTROLNET)
261
+ use_controlnetinpaint = gr.Checkbox(label='Use Controlnet Img2Img', value=False, visible=ENABLE_USE_CONTROLNETINPAINT)
262
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
263
  use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
264
+ use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
265
+ use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
266
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
267
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
268
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
 
327
  step=1,
328
  value=25,
329
  )
330
+ with gr.Row():
331
+ controlnet_conditioning_scale = gr.Slider(
332
+ info="controlnet_conditioning_scale",
333
+ label="controlnet_conditioning_scale",
334
+ minimum=0.01,
335
+ maximum=2,
336
+ step=0.01,
337
+ value=1,
338
+ )
339
+ with gr.Row():
340
+ control_guidance_start = gr.Slider(
341
+ info="control_guidance_start",
342
+ label="control_guidance_start",
343
+ minimum=0.01,
344
+ maximum=1,
345
+ step=0.01,
346
+ value=0,
347
+ )
348
+ with gr.Row():
349
+ control_guidance_end = gr.Slider(
350
+ info="control_guidance_end",
351
+ label="control_guidance_end",
352
+ minimum=0.01,
353
+ maximum=1,
354
+ step=0.01,
355
+ value=1,
356
+ )
357
  with gr.Row():
358
  strength_img2img = gr.Slider(
359
  info="Strength for Img2Img",
 
364
  value=0.7,
365
  )
366
 
 
 
 
 
 
 
 
 
367
  use_negative_prompt.change(
368
  fn=lambda x: gr.update(visible=x),
369
  inputs=use_negative_prompt,
 
399
  queue=False,
400
  api_name=False,
401
  )
402
+ use_lora2.change(
403
+ fn=lambda x: gr.update(visible=x),
404
+ inputs=use_lora2,
405
+ outputs=lora2,
406
+ queue=False,
407
+ api_name=False,
408
+ )
409
  use_img2img.change(
410
  fn=lambda x: gr.update(visible=x),
411
  inputs=use_img2img,
 
413
  queue=False,
414
  api_name=False,
415
  )
416
+ use_controlnet.change(
417
+ fn=lambda x: gr.update(visible=x),
418
+ inputs=use_controlnet,
419
+ outputs=controlnet_img,
420
+ queue=False,
421
+ api_name=False,
422
+ )
423
+ use_controlnetinpaint.change(
424
+ fn=lambda x: gr.update(visible=x),
425
+ inputs=use_controlnetinpaint,
426
+ outputs=controlnet_inpaint,
427
+ queue=False,
428
+ api_name=False,
429
+ )
430
 
431
  gr.on(
432
  triggers=[
 
456
  height,
457
  guidance_scale_base,
458
  num_inference_steps_base,
459
+ controlnet_conditioning_scale,
460
+ control_guidance_start,
461
+ control_guidance_end,
462
  strength_img2img,
463
  use_vae,
464
  use_lora,
465
+ use_lora2,
466
  model,
467
  vaecall,
468
  lora,
469
+ lora2,
470
+ controlnet_model,
471
  lora_scale,
472
+ lora_scale2,
473
  use_img2img,
474
+ use_controlnet,
475
+ use_controlnetinpaint,
476
  url,
477
+ controlnet_img,
478
+ controlnet_inpaint,
479
  ],
480
  outputs=result,
481
  api_name="run",
482
  )
483
 
484
  if __name__ == "__main__":
485
+ demo.queue(max_size=20, default_concurrency_limit=5).launch()