forplaytvplus commited on
Commit
ddb9a25
1 Parent(s): ccb155c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -77,24 +77,24 @@ def generate(
77
  if torch.cuda.is_available():
78
 
79
  if not use_img2img:
80
- pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
81
 
82
  if use_vae:
83
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
84
- pipe = DiffusionPipeline.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
85
 
86
  if use_img2img:
87
- pipe = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
88
 
89
  init_image = load_image(url)
90
 
91
  if use_vae:
92
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
93
- pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16)
94
 
95
  if use_controlnet:
96
- controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
97
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16)
98
 
99
  image = load_image(controlnet_img)
100
 
@@ -105,12 +105,12 @@ def generate(
105
  image = Image.fromarray(image)
106
 
107
  if use_vae:
108
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
109
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
110
 
111
  if use_controlnetinpaint:
112
- controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16)
113
- pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16)
114
 
115
  image_start = load_image(controlnet_img)
116
  image = load_image(controlnet_img)
@@ -123,8 +123,8 @@ def generate(
123
  image = Image.fromarray(image)
124
 
125
  if use_vae:
126
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
127
- pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16)
128
 
129
  if use_lora:
130
  pipe.load_lora_weights(lora, adapter_name="1")
@@ -474,4 +474,4 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
474
  )
475
 
476
  if __name__ == "__main__":
477
- demo.queue(max_size=20, default_concurrency_limit=5).launch()
 
77
  if torch.cuda.is_available():
78
 
79
  if not use_img2img:
80
+ pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
81
 
82
  if use_vae:
83
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
84
+ pipe = DiffusionPipeline.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
85
 
86
  if use_img2img:
87
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
88
 
89
  init_image = load_image(url)
90
 
91
  if use_vae:
92
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
93
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
94
 
95
  if use_controlnet:
96
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
97
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
98
 
99
  image = load_image(controlnet_img)
100
 
 
105
  image = Image.fromarray(image)
106
 
107
  if use_vae:
108
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
109
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
110
 
111
  if use_controlnetinpaint:
112
+ controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
113
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
114
 
115
  image_start = load_image(controlnet_img)
116
  image = load_image(controlnet_img)
 
123
  image = Image.fromarray(image)
124
 
125
  if use_vae:
126
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
127
+ pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
128
 
129
  if use_lora:
130
  pipe.load_lora_weights(lora, adapter_name="1")
 
474
  )
475
 
476
  if __name__ == "__main__":
477
+ demo.queue(max_size=20, default_concurrency_limit=2).launch()