habulaj commited on
Commit
57218c8
·
verified ·
1 Parent(s): a896fab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +254 -88
app.py CHANGED
@@ -1,40 +1,37 @@
1
- #!/usr/bin/env pythona
2
 
3
  from __future__ import annotations
4
 
5
  import requests
6
  import os
7
  import random
8
- import random
9
- import string
10
 
11
  import gradio as gr
12
  import numpy as np
13
  import spaces
14
  import torch
15
- import gc
16
  import cv2
 
 
17
  from PIL import Image
18
- from accelerate import init_empty_weights
19
  from io import BytesIO
20
  from diffusers.utils import load_image
21
- from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
22
- from controlnet_aux import HEDdetector
23
- from compel import Compel, ReturnedEmbeddingsType
24
- import threading
25
 
26
  DESCRIPTION = "# Run any LoRA or SD Model"
27
  if not torch.cuda.is_available():
28
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
29
 
30
  MAX_SEED = np.iinfo(np.int32).max
31
- CUDA_LAUNCH_BLOCKING=1
32
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
33
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
34
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
35
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
36
  ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1"
 
37
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
 
 
38
 
39
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
 
@@ -43,112 +40,181 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
43
  seed = random.randint(0, MAX_SEED)
44
  return seed
45
 
46
- cached_pipelines = {} # Dicionário para armazenar os pipelines
47
- cached_loras = {}
48
- # Crie um objeto Lock
49
- pipeline_lock = threading.Lock()
50
 
51
  @spaces.GPU
52
  def generate(
53
- prompt: str = "",
54
  negative_prompt: str = "",
 
 
55
  use_negative_prompt: bool = False,
 
 
56
  seed: int = 0,
57
  width: int = 1024,
58
  height: int = 1024,
59
  guidance_scale_base: float = 5.0,
60
  num_inference_steps_base: int = 25,
 
 
 
61
  strength_img2img: float = 0.7,
 
62
  use_lora: bool = False,
63
  use_lora2: bool = False,
64
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
 
65
  lora = '',
66
  lora2 = '',
 
67
  lora_scale: float = 0.7,
68
  lora_scale2: float = 0.7,
69
  use_img2img: bool = False,
 
 
70
  url = '',
71
- ):
72
- global cached_pipelines, cached_loras
73
-
74
  if torch.cuda.is_available():
75
- # Construa a chave do dicionário baseada no modelo e no tipo de pipeline
76
- pipeline_key = (model, use_img2img)
77
 
78
- if pipeline_key not in cached_pipelines:
79
- if not use_img2img:
80
- cached_pipelines[pipeline_key] = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
81
- elif use_img2img:
82
- cached_pipelines[pipeline_key] = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
83
-
84
- pipe = cached_pipelines[pipeline_key] # Usa o pipeline carregado da memória
85
-
 
 
 
 
86
  if use_img2img:
 
 
87
  init_image = load_image(url)
88
-
89
- if use_lora:
90
- lora_key = (lora, lora_scale)
91
- if lora_key not in cached_loras:
92
- adapter_name = ''.join(random.choice(string.ascii_letters) for _ in range(5))
93
- pipe.load_lora_weights(lora, adapter_name=adapter_name)
94
- cached_loras[lora_key] = adapter_name
95
- else:
96
- adapter_name = cached_loras[lora_key]
97
- pipe.set_adapters(adapter_name, adapter_weights=[lora_scale])
98
 
99
- if use_lora2:
100
- lora_key1 = (lora, lora_scale)
101
- lora_key2 = (lora2, lora_scale2)
102
- if lora_key1 not in cached_loras:
103
- adapter_name1 = ''.join(random.choice(string.ascii_letters) for _ in range(5))
104
- pipe.load_lora_weights(lora, adapter_name=adapter_name1)
105
- cached_loras[lora_key1] = adapter_name1
106
- else:
107
- adapter_name1 = cached_loras[lora_key1]
108
- if lora_key2 not in cached_loras:
109
- adapter_name2 = ''.join(random.choice(string.ascii_letters) for _ in range(5))
110
- pipe.load_lora_weights(lora2, adapter_name=adapter_name2)
111
- cached_loras[lora_key2] = adapter_name2
112
- else:
113
- adapter_name2 = cached_loras[lora_key2]
114
- pipe.set_adapters([adapter_name1, adapter_name2], adapter_weights=[lora_scale, lora_scale2])
115
 
116
- pipe.enable_model_cpu_offload()
117
- generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- if not use_negative_prompt:
120
- negative_prompt = None # type: ignore
 
 
121
 
122
- with pipeline_lock:
123
- if use_img2img:
124
- result = pipe(
125
- prompt=prompt,
126
- negative_prompt=negative_prompt,
127
- image=init_image,
128
- strength=strength_img2img,
129
- width=width,
130
- height=height,
131
- guidance_scale=guidance_scale_base,
132
- num_inference_steps=num_inference_steps_base,
133
- generator=generator,
134
- ).images[0]
135
- else:
136
- result = pipe(
137
- prompt=prompt,
138
- negative_prompt=negative_prompt,
139
- width=width,
140
- height=height,
141
- guidance_scale=guidance_scale_base,
142
- num_inference_steps=num_inference_steps_base,
143
- generator=generator,
144
- ).images[0]
145
 
146
- # Limpeza de memória
147
- del pipe
148
- torch.cuda.empty_cache()
149
- gc.collect()
150
- return result
 
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
153
  gr.HTML(
154
  "<p><center>📙 For any additional support, join our <a href='https://discord.gg/JprjXpjt9K'>Discord</a></center></p>"
@@ -156,8 +222,10 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
156
  gr.Markdown(DESCRIPTION, elem_id="description")
157
  with gr.Group():
158
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
 
159
  lora = gr.Text(label='LoRA 1', placeholder='e.g. nerijs/pixel-art-xl')
160
  lora2 = gr.Text(label='LoRA 2', placeholder='e.g. nerijs/pixel-art-xl')
 
161
  lora_scale = gr.Slider(
162
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
163
  label="Lora Scale 1",
@@ -174,7 +242,9 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
174
  step=0.01,
175
  value=0.7,
176
  )
177
- url = gr.Text(label='URL (Img2Img)')
 
 
178
  with gr.Row():
179
  prompt = gr.Text(
180
  placeholder="Input prompt",
@@ -187,16 +257,34 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
187
  result = gr.Image(label="Result", show_label=False)
188
  with gr.Accordion("Advanced options", open=False):
189
  with gr.Row():
 
 
190
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
 
191
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
192
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
193
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
 
 
194
  negative_prompt = gr.Text(
195
  placeholder="Input Negative Prompt",
196
  label="Negative prompt",
197
  max_lines=1,
198
  visible=False,
199
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  seed = gr.Slider(
201
  label="Seed",
202
  minimum=0,
@@ -239,6 +327,33 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
239
  step=1,
240
  value=25,
241
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  with gr.Row():
243
  strength_img2img = gr.Slider(
244
  info="Strength for Img2Img",
@@ -256,6 +371,27 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
256
  queue=False,
257
  api_name=False,
258
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
  use_lora.change(
260
  fn=lambda x: gr.update(visible=x),
261
  inputs=use_lora,
@@ -277,11 +413,27 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
277
  queue=False,
278
  api_name=False,
279
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
  gr.on(
282
  triggers=[
283
  prompt.submit,
284
  negative_prompt.submit,
 
 
285
  run_button.click,
286
  ],
287
  fn=randomize_seed_fn,
@@ -294,26 +446,40 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
294
  inputs=[
295
  prompt,
296
  negative_prompt,
 
 
297
  use_negative_prompt,
 
 
298
  seed,
299
  width,
300
  height,
301
  guidance_scale_base,
302
  num_inference_steps_base,
 
 
 
303
  strength_img2img,
 
304
  use_lora,
305
  use_lora2,
306
  model,
 
307
  lora,
308
  lora2,
 
309
  lora_scale,
310
  lora_scale2,
311
  use_img2img,
 
 
312
  url,
 
 
313
  ],
314
  outputs=result,
315
  api_name="run",
316
  )
317
 
318
  if __name__ == "__main__":
319
- demo.queue(max_size=4, default_concurrency_limit=4).launch()
 
1
+ #!/usr/bin/env python
2
 
3
  from __future__ import annotations
4
 
5
  import requests
6
  import os
7
  import random
 
 
8
 
9
  import gradio as gr
10
  import numpy as np
11
  import spaces
12
  import torch
 
13
  import cv2
14
+ import xformers
15
+ import triton
16
  from PIL import Image
 
17
  from io import BytesIO
18
  from diffusers.utils import load_image
19
+ from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, EulerDiscreteScheduler, DPMSolverMultistepScheduler
 
 
 
20
 
21
  DESCRIPTION = "# Run any LoRA or SD Model"
22
  if not torch.cuda.is_available():
23
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
 
26
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
27
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
28
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
29
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
30
  ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1"
31
+ ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
32
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
33
+ ENABLE_USE_CONTROLNET = os.getenv("ENABLE_USE_CONTROLNET", "1") == "1"
34
+ ENABLE_USE_CONTROLNETIMG2IMG = os.getenv("ENABLE_USE_CONTROLNET", "1") == "1"
35
 
36
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
37
 
 
40
  seed = random.randint(0, MAX_SEED)
41
  return seed
42
 
 
 
 
 
43
 
44
  @spaces.GPU
45
  def generate(
46
+ prompt: str,
47
  negative_prompt: str = "",
48
+ prompt_2: str = "",
49
+ negative_prompt_2: str = "",
50
  use_negative_prompt: bool = False,
51
+ use_prompt_2: bool = False,
52
+ use_negative_prompt_2: bool = False,
53
  seed: int = 0,
54
  width: int = 1024,
55
  height: int = 1024,
56
  guidance_scale_base: float = 5.0,
57
  num_inference_steps_base: int = 25,
58
+ controlnet_conditioning_scale: float = 1,
59
+ control_guidance_start: float = 0,
60
+ control_guidance_end: float = 1,
61
  strength_img2img: float = 0.7,
62
+ use_vae: bool = False,
63
  use_lora: bool = False,
64
  use_lora2: bool = False,
65
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
66
+ vaecall = 'madebyollin/sdxl-vae-fp16-fix',
67
  lora = '',
68
  lora2 = '',
69
+ controlnet_model = 'diffusers/controlnet-canny-sdxl-1.0',
70
  lora_scale: float = 0.7,
71
  lora_scale2: float = 0.7,
72
  use_img2img: bool = False,
73
+ use_controlnet: bool = False,
74
+ use_controlnetimg2img: bool = False,
75
  url = '',
76
+ controlnet_img = '',
77
+ controlnet_img2img = '',
78
+ ):
79
  if torch.cuda.is_available():
 
 
80
 
81
+ if not use_img2img:
82
+ scheduler = DPMSolverMultistepScheduler.from_pretrained(model, subfolder="scheduler")
83
+ pipe = DiffusionPipeline.from_pretrained(model, scheduler=scheduler, torch_dtype=torch.float16)
84
+
85
+ pipe.to(device)
86
+
87
+ if use_vae:
88
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
89
+ pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
90
+
91
+ pipe.to(device)
92
+
93
  if use_img2img:
94
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
95
+
96
  init_image = load_image(url)
97
+
98
+ if use_vae:
99
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
100
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
101
+
102
+ if use_controlnet:
103
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
104
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, controlnet=controlnet, torch_dtype=torch.float16)
105
+
106
+ image = load_image(controlnet_img)
107
 
108
+ image = np.array(image)
109
+ image = cv2.Canny(image, 100, 200)
110
+ image = image[:, :, None]
111
+ image = np.concatenate([image, image, image], axis=2)
112
+ image = Image.fromarray(image)
113
+
114
+ if use_vae:
115
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
116
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
 
 
 
 
 
 
 
117
 
118
+ if use_controlnetimg2img:
119
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
120
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, controlnet=controlnet, torch_dtype=torch.float16)
121
+
122
+ image_start = load_image(controlnet_img)
123
+ image = load_image(controlnet_img)
124
+ image_mask = load_image(controlnet_img2img)
125
+
126
+ image = np.array(image)
127
+ image = cv2.Canny(image, 100, 200)
128
+ image = image[:, :, None]
129
+ image = np.concatenate([image, image, image], axis=2)
130
+ image = Image.fromarray(image)
131
+
132
+ if use_vae:
133
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
134
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
135
+
136
+ if use_lora:
137
+ pipe.load_lora_weights(lora)
138
+ pipe.fuse_lora(lora_scale)
139
 
140
+ if use_lora2:
141
+ pipe.load_lora_weights(lora, adapter_name="1")
142
+ pipe.load_lora_weights(lora2, adapter_name="2")
143
+ pipe.set_adapters(["1", "2"], adapter_weights=[lora_scale, lora_scale2])
144
 
145
+ generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
+ if not use_negative_prompt:
148
+ negative_prompt = None # type: ignore
149
+ if not use_prompt_2:
150
+ prompt_2 = None # type: ignore
151
+ if not use_negative_prompt_2:
152
+ negative_prompt_2 = None # type: ignore
153
 
154
+ if use_controlnetimg2img:
155
+ image = pipe(
156
+ prompt=prompt,
157
+ strength=strength_img2img,
158
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
159
+ eta=0.0,
160
+ mask_image=image_mask,
161
+ image=image_start,
162
+ control_image=image,
163
+ negative_prompt=negative_prompt,
164
+ width=width,
165
+ height=height,
166
+ guidance_scale=guidance_scale_base,
167
+ num_inference_steps=num_inference_steps_base,
168
+ generator=generator,
169
+ ).images[0]
170
+ return image
171
+ if use_controlnet:
172
+ image = pipe(
173
+ prompt=prompt,
174
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
175
+ control_guidance_start=control_guidance_start,
176
+ control_guidance_end=control_guidance_end,
177
+ image=image,
178
+ negative_prompt=negative_prompt,
179
+ prompt_2=prompt_2,
180
+ width=width,
181
+ height=height,
182
+ negative_prompt_2=negative_prompt_2,
183
+ guidance_scale=guidance_scale_base,
184
+ num_inference_steps=num_inference_steps_base,
185
+ generator=generator,
186
+ ).images[0]
187
+ return image
188
+ elif use_img2img:
189
+ images = pipe(
190
+ prompt=prompt,
191
+ image=init_image,
192
+ strength=strength_img2img,
193
+ negative_prompt=negative_prompt,
194
+ prompt_2=prompt_2,
195
+ negative_prompt_2=negative_prompt_2,
196
+ width=width,
197
+ height=height,
198
+ guidance_scale=guidance_scale_base,
199
+ num_inference_steps=num_inference_steps_base,
200
+ generator=generator,
201
+ output_type="pil",
202
+ ).images[0]
203
+ return images
204
+ else:
205
+ return pipe(
206
+ prompt=prompt,
207
+ negative_prompt=negative_prompt,
208
+ prompt_2=prompt_2,
209
+ negative_prompt_2=negative_prompt_2,
210
+ width=width,
211
+ height=height,
212
+ guidance_scale=guidance_scale_base,
213
+ num_inference_steps=num_inference_steps_base,
214
+ generator=generator,
215
+ output_type="pil",
216
+ ).images[0]
217
+
218
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
219
  gr.HTML(
220
  "<p><center>📙 For any additional support, join our <a href='https://discord.gg/JprjXpjt9K'>Discord</a></center></p>"
 
222
  gr.Markdown(DESCRIPTION, elem_id="description")
223
  with gr.Group():
224
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
225
+ vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
226
  lora = gr.Text(label='LoRA 1', placeholder='e.g. nerijs/pixel-art-xl')
227
  lora2 = gr.Text(label='LoRA 2', placeholder='e.g. nerijs/pixel-art-xl')
228
+ controlnet_model = gr.Text(label='Controlnet', placeholder='e.g diffusers/controlnet-canny-sdxl-1.0')
229
  lora_scale = gr.Slider(
230
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
231
  label="Lora Scale 1",
 
242
  step=0.01,
243
  value=0.7,
244
  )
245
+ url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
246
+ controlnet_img = gr.Text(label='URL (Controlnet)', placeholder='e.g https://example.com/image.png')
247
+ controlnet_img2img = gr.Text(label='URL (Controlnet - IMG2IMG)', placeholder='e.g https://example.com/image.png')
248
  with gr.Row():
249
  prompt = gr.Text(
250
  placeholder="Input prompt",
 
257
  result = gr.Image(label="Result", show_label=False)
258
  with gr.Accordion("Advanced options", open=False):
259
  with gr.Row():
260
+ use_controlnet = gr.Checkbox(label='Use Controlnet', value=False, visible=ENABLE_USE_CONTROLNET)
261
+ use_controlnetimg2img = gr.Checkbox(label='Use Controlnet Img2Img', value=False, visible=ENABLE_USE_CONTROLNETIMG2IMG)
262
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
263
+ use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
264
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
265
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
266
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
267
+ use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
268
+ use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
269
  negative_prompt = gr.Text(
270
  placeholder="Input Negative Prompt",
271
  label="Negative prompt",
272
  max_lines=1,
273
  visible=False,
274
  )
275
+ prompt_2 = gr.Text(
276
+ placeholder="Input Prompt 2",
277
+ label="Prompt 2",
278
+ max_lines=1,
279
+ visible=False,
280
+ )
281
+ negative_prompt_2 = gr.Text(
282
+ placeholder="Input Negative Prompt 2",
283
+ label="Negative prompt 2",
284
+ max_lines=1,
285
+ visible=False,
286
+ )
287
+
288
  seed = gr.Slider(
289
  label="Seed",
290
  minimum=0,
 
327
  step=1,
328
  value=25,
329
  )
330
+ with gr.Row():
331
+ controlnet_conditioning_scale = gr.Slider(
332
+ info="controlnet_conditioning_scale",
333
+ label="controlnet_conditioning_scale",
334
+ minimum=0.01,
335
+ maximum=2,
336
+ step=0.01,
337
+ value=1,
338
+ )
339
+ with gr.Row():
340
+ control_guidance_start = gr.Slider(
341
+ info="control_guidance_start",
342
+ label="control_guidance_start",
343
+ minimum=0.01,
344
+ maximum=1,
345
+ step=0.01,
346
+ value=0,
347
+ )
348
+ with gr.Row():
349
+ control_guidance_end = gr.Slider(
350
+ info="control_guidance_end",
351
+ label="control_guidance_end",
352
+ minimum=0.01,
353
+ maximum=1,
354
+ step=0.01,
355
+ value=1,
356
+ )
357
  with gr.Row():
358
  strength_img2img = gr.Slider(
359
  info="Strength for Img2Img",
 
371
  queue=False,
372
  api_name=False,
373
  )
374
+ use_prompt_2.change(
375
+ fn=lambda x: gr.update(visible=x),
376
+ inputs=use_prompt_2,
377
+ outputs=prompt_2,
378
+ queue=False,
379
+ api_name=False,
380
+ )
381
+ use_negative_prompt_2.change(
382
+ fn=lambda x: gr.update(visible=x),
383
+ inputs=use_negative_prompt_2,
384
+ outputs=negative_prompt_2,
385
+ queue=False,
386
+ api_name=False,
387
+ )
388
+ use_vae.change(
389
+ fn=lambda x: gr.update(visible=x),
390
+ inputs=use_vae,
391
+ outputs=vaecall,
392
+ queue=False,
393
+ api_name=False,
394
+ )
395
  use_lora.change(
396
  fn=lambda x: gr.update(visible=x),
397
  inputs=use_lora,
 
413
  queue=False,
414
  api_name=False,
415
  )
416
+ use_controlnet.change(
417
+ fn=lambda x: gr.update(visible=x),
418
+ inputs=use_controlnet,
419
+ outputs=controlnet_img,
420
+ queue=False,
421
+ api_name=False,
422
+ )
423
+ use_controlnetimg2img.change(
424
+ fn=lambda x: gr.update(visible=x),
425
+ inputs=use_controlnetimg2img,
426
+ outputs=controlnet_img2img,
427
+ queue=False,
428
+ api_name=False,
429
+ )
430
 
431
  gr.on(
432
  triggers=[
433
  prompt.submit,
434
  negative_prompt.submit,
435
+ prompt_2.submit,
436
+ negative_prompt_2.submit,
437
  run_button.click,
438
  ],
439
  fn=randomize_seed_fn,
 
446
  inputs=[
447
  prompt,
448
  negative_prompt,
449
+ prompt_2,
450
+ negative_prompt_2,
451
  use_negative_prompt,
452
+ use_prompt_2,
453
+ use_negative_prompt_2,
454
  seed,
455
  width,
456
  height,
457
  guidance_scale_base,
458
  num_inference_steps_base,
459
+ controlnet_conditioning_scale,
460
+ control_guidance_start,
461
+ control_guidance_end,
462
  strength_img2img,
463
+ use_vae,
464
  use_lora,
465
  use_lora2,
466
  model,
467
+ vaecall,
468
  lora,
469
  lora2,
470
+ controlnet_model,
471
  lora_scale,
472
  lora_scale2,
473
  use_img2img,
474
+ use_controlnet,
475
+ use_controlnetimg2img,
476
  url,
477
+ controlnet_img,
478
+ controlnet_img2img,
479
  ],
480
  outputs=result,
481
  api_name="run",
482
  )
483
 
484
  if __name__ == "__main__":
485
+ demo.queue(max_size=20).launch()