Spaces:
Runtime error
Runtime error
File size: 1,496 Bytes
8771ea4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import os
import ldm_patched.modules.sd
def first_file(path, filenames):
for f in filenames:
p = os.path.join(path, f)
if os.path.exists(p):
return p
return None
def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None):
diffusion_model_names = ["diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.bin"]
unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names)
vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
text_encoder_paths = [text_encoder1_path]
if text_encoder2_path is not None:
text_encoder_paths.append(text_encoder2_path)
unet = ldm_patched.modules.sd.load_unet(unet_path)
clip = None
if output_clip:
clip = ldm_patched.modules.sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory)
vae = None
if output_vae:
sd = ldm_patched.modules.utils.load_torch_file(vae_path)
vae = ldm_patched.modules.sd.VAE(sd=sd)
return (unet, clip, vae)
|