adamelliotfields commited on
Commit
14665b0
1 Parent(s): 5f39fb6

Remove tiny VAE

Browse files
Files changed (6) hide show
  1. DOCS.md +0 -4
  2. README.md +2 -3
  3. app.py +0 -6
  4. lib/config.py +0 -1
  5. lib/inference.py +0 -2
  6. lib/loader.py +19 -47
DOCS.md CHANGED
@@ -95,7 +95,3 @@ Enable `Use negative TI` to append [`fast_negative`](https://civitai.com/models/
95
  #### Clip Skip
96
 
97
  When enabled, the last CLIP layer is skipped. Can sometimes improve image quality.
98
-
99
- #### Tiny VAE
100
-
101
- Enable [madebyollin/taesd](https://github.com/madebyollin/taesd) for near-instant latent decoding with a minor loss in detail. Useful for development.
 
95
  #### Clip Skip
96
 
97
  When enabled, the last CLIP layer is skipped. Can sometimes improve image quality.
 
 
 
 
README.md CHANGED
@@ -60,14 +60,13 @@ preload_from_hub: # up to 10
60
  # diffusion
61
 
62
  Gradio app for Stable Diffusion 1.5 featuring:
63
- * txt2img and img2img pipelines with ControlNet and IP-Adapter
64
- * Canny edge detection (more preprocessors coming soon)
65
  * Compel prompt weighting
66
  * Hand-written style templates
67
  * Multiple samplers with Karras scheduling
68
  * DeepCache, FreeU, and Clip Skip available
69
  * Real-ESRGAN upscaling
70
- * Optional tiny autoencoder
71
 
72
  ## Usage
73
 
 
60
  # diffusion
61
 
62
  Gradio app for Stable Diffusion 1.5 featuring:
63
+ * txt2img and img2img pipelines with IP-Adapter
64
+ * ControlNet with Canny edge detection (more preprocessors coming soon)
65
  * Compel prompt weighting
66
  * Hand-written style templates
67
  * Multiple samplers with Karras scheduling
68
  * DeepCache, FreeU, and Clip Skip available
69
  * Real-ESRGAN upscaling
 
70
 
71
  ## Usage
72
 
app.py CHANGED
@@ -302,11 +302,6 @@ with gr.Blocks(
302
  label="Use negative TI",
303
  value=False,
304
  )
305
- use_taesd = gr.Checkbox(
306
- elem_classes=["checkbox"],
307
- label="Tiny VAE",
308
- value=False,
309
- )
310
  use_freeu = gr.Checkbox(
311
  elem_classes=["checkbox"],
312
  label="FreeU",
@@ -456,7 +451,6 @@ with gr.Blocks(
456
  scale,
457
  num_images,
458
  use_karras,
459
- use_taesd,
460
  use_freeu,
461
  use_clip_skip,
462
  use_ip_face,
 
302
  label="Use negative TI",
303
  value=False,
304
  )
 
 
 
 
 
305
  use_freeu = gr.Checkbox(
306
  elem_classes=["checkbox"],
307
  label="FreeU",
 
451
  scale,
452
  num_images,
453
  use_karras,
 
454
  use_freeu,
455
  use_clip_skip,
456
  use_ip_face,
lib/config.py CHANGED
@@ -68,7 +68,6 @@ Config = SimpleNamespace(
68
  "Linaqruf/anything-v3-1": ["anything-v3-2.safetensors"],
69
  "lllyasviel/control_v11p_sd15_canny": ["diffusion_pytorch_model.fp16.safetensors"],
70
  "Lykon/dreamshaper-8": [*sd_files],
71
- "madebyollin/taesd": ["diffusion_pytorch_model.safetensors"],
72
  "prompthero/openjourney-v4": ["openjourney-v4.ckpt"],
73
  "SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
74
  "XpucT/Deliberate": ["Deliberate_v6.safetensors"],
 
68
  "Linaqruf/anything-v3-1": ["anything-v3-2.safetensors"],
69
  "lllyasviel/control_v11p_sd15_canny": ["diffusion_pytorch_model.fp16.safetensors"],
70
  "Lykon/dreamshaper-8": [*sd_files],
 
71
  "prompthero/openjourney-v4": ["openjourney-v4.ckpt"],
72
  "SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
73
  "XpucT/Deliberate": ["Deliberate_v6.safetensors"],
lib/inference.py CHANGED
@@ -80,7 +80,6 @@ def generate(
80
  scale=1,
81
  num_images=1,
82
  karras=False,
83
- taesd=False,
84
  freeu=False,
85
  clip_skip=False,
86
  ip_face=False,
@@ -144,7 +143,6 @@ def generate(
144
  deepcache,
145
  scale,
146
  karras,
147
- taesd,
148
  freeu,
149
  progress,
150
  )
 
80
  scale=1,
81
  num_images=1,
82
  karras=False,
 
83
  freeu=False,
84
  clip_skip=False,
85
  ip_face=False,
 
143
  deepcache,
144
  scale,
145
  karras,
 
146
  freeu,
147
  progress,
148
  )
lib/loader.py CHANGED
@@ -4,7 +4,7 @@ from threading import Lock
4
  import torch
5
  from DeepCache import DeepCacheSDHelper
6
  from diffusers import ControlNetModel
7
- from diffusers.models import AutoencoderKL, AutoencoderTiny
8
  from diffusers.models.attention_processor import AttnProcessor2_0, IPAdapterAttnProcessor2_0
9
 
10
  from .config import Config
@@ -29,20 +29,6 @@ class Loader:
29
  cls._instance.log = Logger("Loader")
30
  return cls._instance
31
 
32
- @property
33
- def _is_kl_vae(self):
34
- if self.pipe is not None:
35
- vae_type = type(self.pipe.vae)
36
- return issubclass(vae_type, AutoencoderKL)
37
- return False
38
-
39
- @property
40
- def _is_tiny_vae(self):
41
- if self.pipe is not None:
42
- vae_type = type(self.pipe.vae)
43
- return issubclass(vae_type, AutoencoderTiny)
44
- return False
45
-
46
  @property
47
  def _has_freeu(self):
48
  if self.pipe is not None:
@@ -184,6 +170,7 @@ class Loader:
184
  to_unload.append("model")
185
  to_unload.append("pipe")
186
 
 
187
  clear_cuda_cache()
188
  for component in to_unload:
189
  setattr(self, component, None)
@@ -284,32 +271,22 @@ class Loader:
284
  if self.pipe is not None:
285
  self.pipe.set_progress_bar_config(disable=progress is not None)
286
 
287
- def _load_vae(self, taesd=False, model=""):
288
- # by default all models use KL
289
- if self._is_kl_vae and taesd:
290
- msg = "Loading Tiny VAE"
291
- with timer(msg, logger=self.log.info):
292
- self.pipe.vae = AutoencoderTiny.from_pretrained(
293
- pretrained_model_name_or_path="madebyollin/taesd",
294
  torch_dtype=self.pipe.dtype,
295
  ).to(self.pipe.device)
296
- return
297
-
298
- if self._is_tiny_vae and not taesd:
299
- msg = "Loading KL VAE"
300
- with timer(msg, logger=self.log.info):
301
- if model.lower() in Config.MODEL_CHECKPOINTS.keys():
302
- self.pipe.vae = AutoencoderKL.from_single_file(
303
- f"https://huggingface.co/{model}/{Config.MODEL_CHECKPOINTS[model.lower()]}",
304
- torch_dtype=self.pipe.dtype,
305
- ).to(self.pipe.device)
306
- else:
307
- self.pipe.vae = AutoencoderKL.from_pretrained(
308
- pretrained_model_name_or_path=model,
309
- torch_dtype=self.pipe.dtype,
310
- subfolder="vae",
311
- variant="fp16",
312
- ).to(self.pipe.device)
313
 
314
  def load(
315
  self,
@@ -321,7 +298,6 @@ class Loader:
321
  deepcache,
322
  scale,
323
  karras,
324
- taesd,
325
  freeu,
326
  progress,
327
  ):
@@ -397,11 +373,12 @@ class Loader:
397
  if not same_scheduler or not same_karras:
398
  self.pipe.scheduler = Config.SCHEDULERS[scheduler](**scheduler_kwargs)
399
 
 
 
 
400
  CURRENT_STEP = 1
401
  TOTAL_STEPS = sum(
402
  [
403
- self._is_kl_vae and taesd,
404
- self._is_tiny_vae and not taesd,
405
  self._should_load_freeu(freeu),
406
  self._should_load_deepcache(deepcache),
407
  self._should_load_ip_adapter(ip_adapter),
@@ -428,8 +405,3 @@ class Loader:
428
  if self._should_load_upscaler(scale):
429
  self._load_upscaler(scale)
430
  safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)
431
- CURRENT_STEP += 1
432
-
433
- if self._is_kl_vae and taesd or self._is_tiny_vae and not taesd:
434
- self._load_vae(taesd, model)
435
- safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)
 
4
  import torch
5
  from DeepCache import DeepCacheSDHelper
6
  from diffusers import ControlNetModel
7
+ from diffusers.models import AutoencoderKL
8
  from diffusers.models.attention_processor import AttnProcessor2_0, IPAdapterAttnProcessor2_0
9
 
10
  from .config import Config
 
29
  cls._instance.log = Logger("Loader")
30
  return cls._instance
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  @property
33
  def _has_freeu(self):
34
  if self.pipe is not None:
 
170
  to_unload.append("model")
171
  to_unload.append("pipe")
172
 
173
+ # Flush cache and run garbage collector
174
  clear_cuda_cache()
175
  for component in to_unload:
176
  setattr(self, component, None)
 
271
  if self.pipe is not None:
272
  self.pipe.set_progress_bar_config(disable=progress is not None)
273
 
274
+ # Handle single-file and diffusers-style models
275
+ def _load_vae(self, model=""):
276
+ msg = "Loading VAE"
277
+ with timer(msg, logger=self.log.info):
278
+ if model.lower() in Config.MODEL_CHECKPOINTS.keys():
279
+ self.pipe.vae = AutoencoderKL.from_single_file(
280
+ f"https://huggingface.co/{model}/{Config.MODEL_CHECKPOINTS[model.lower()]}",
281
  torch_dtype=self.pipe.dtype,
282
  ).to(self.pipe.device)
283
+ else:
284
+ self.pipe.vae = AutoencoderKL.from_pretrained(
285
+ pretrained_model_name_or_path=model,
286
+ torch_dtype=self.pipe.dtype,
287
+ subfolder="vae",
288
+ variant="fp16",
289
+ ).to(self.pipe.device)
 
 
 
 
 
 
 
 
 
 
290
 
291
  def load(
292
  self,
 
298
  deepcache,
299
  scale,
300
  karras,
 
301
  freeu,
302
  progress,
303
  ):
 
373
  if not same_scheduler or not same_karras:
374
  self.pipe.scheduler = Config.SCHEDULERS[scheduler](**scheduler_kwargs)
375
 
376
+ # Load VAE
377
+ self._load_vae(model)
378
+
379
  CURRENT_STEP = 1
380
  TOTAL_STEPS = sum(
381
  [
 
 
382
  self._should_load_freeu(freeu),
383
  self._should_load_deepcache(deepcache),
384
  self._should_load_ip_adapter(ip_adapter),
 
405
  if self._should_load_upscaler(scale):
406
  self._load_upscaler(scale)
407
  safe_progress(progress, CURRENT_STEP, TOTAL_STEPS, desc)