amazonaws-la commited on
Commit
b6711e8
1 Parent(s): e24684d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -0
app.py CHANGED
@@ -22,6 +22,7 @@ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
24
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
 
25
 
26
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
27
 
@@ -47,6 +48,7 @@ def generate(
47
  guidance_scale_refiner: float = 5.0,
48
  num_inference_steps_base: int = 25,
49
  num_inference_steps_refiner: int = 25,
 
50
  apply_refiner: bool = False,
51
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
52
  vaecall = 'madebyollin/sdxl-vae-fp16-fix',
@@ -54,6 +56,9 @@ def generate(
54
  ) -> PIL.Image.Image:
55
  if torch.cuda.is_available():
56
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
 
 
 
57
 
58
  if ENABLE_CPU_OFFLOAD:
59
  pipe.enable_model_cpu_offload()
@@ -185,6 +190,7 @@ with gr.Blocks(css="style.css") as demo:
185
  step=32,
186
  value=1024,
187
  )
 
188
  apply_refiner = gr.Checkbox(label="Apply refiner", value=False, visible=ENABLE_REFINER)
189
  with gr.Row():
190
  guidance_scale_base = gr.Slider(
@@ -246,6 +252,13 @@ with gr.Blocks(css="style.css") as demo:
246
  queue=False,
247
  api_name=False,
248
  )
 
 
 
 
 
 
 
249
  apply_refiner.change(
250
  fn=lambda x: gr.update(visible=x),
251
  inputs=apply_refiner,
@@ -284,6 +297,7 @@ with gr.Blocks(css="style.css") as demo:
284
  guidance_scale_refiner,
285
  num_inference_steps_base,
286
  num_inference_steps_refiner,
 
287
  apply_refiner,
288
  model,
289
  vaecall,
 
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
24
  ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
25
+ ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
26
 
27
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
 
 
48
  guidance_scale_refiner: float = 5.0,
49
  num_inference_steps_base: int = 25,
50
  num_inference_steps_refiner: int = 25,
51
+ use_lora: bool = False,
52
  apply_refiner: bool = False,
53
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
54
  vaecall = 'madebyollin/sdxl-vae-fp16-fix',
 
56
  ) -> PIL.Image.Image:
57
  if torch.cuda.is_available():
58
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
59
+
60
+ if use_lora:
61
+ pipe.load_lora_weights(lora)
62
 
63
  if ENABLE_CPU_OFFLOAD:
64
  pipe.enable_model_cpu_offload()
 
190
  step=32,
191
  value=1024,
192
  )
193
+ use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
194
  apply_refiner = gr.Checkbox(label="Apply refiner", value=False, visible=ENABLE_REFINER)
195
  with gr.Row():
196
  guidance_scale_base = gr.Slider(
 
252
  queue=False,
253
  api_name=False,
254
  )
255
+ use_lora.change(
256
+ fn=lambda x: gr.update(visible=x),
257
+ inputs=use_lora,
258
+ outputs=lora,
259
+ queue=False,
260
+ api_name=False,
261
+ )
262
  apply_refiner.change(
263
  fn=lambda x: gr.update(visible=x),
264
  inputs=apply_refiner,
 
297
  guidance_scale_refiner,
298
  num_inference_steps_base,
299
  num_inference_steps_refiner,
300
+ use_lora,
301
  apply_refiner,
302
  model,
303
  vaecall,