Diffusers Tools
This is a collection of scripts that can be useful for various tasks related to the diffusers library
1. Test against original checkpoints
It's very important to have visually the exact same results as the original code bases.!
E.g. to make use diffusers
is identical to the original CompVis codebase, you can run the following script in the original CompVis codebase:
Download the original SD-1-4 checkpoint and put it in the correct folder following the instructions on: https://github.com/CompVis/stable-diffusion
Run the following command
python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --seed 0 --n_samples 1 --n_rows 1 --n_iter 1
and compare this to the same command in diffusers:
from diffusers import DiffusionPipeline, StableDiffusionPipeline, DDIMScheduler
import torch
# python scripts/txt2img.py --prompt "a photograph of an astronaut riding a horse" --seed 0 --n_samples 1 --n_rows 1 --n_iter 1
seed = 0
prompt = "a photograph of an astronaut riding a horse"
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16)
pipe = pipe.to("cuda")
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
torch.manual_seed(0)
image = pipe(prompt, num_inference_steps=50).images[0]
image.save("/home/patrick_huggingface_co/images/aa_comp.png")
Both commands should give the following image on a V100:
2. Test against k-diffusion:
You can run the following script to compare against k-diffusion.
See results here
from diffusers import StableDiffusionKDiffusionPipeline, HeunDiscreteScheduler, StableDiffusionPipeline, DPMSolverMultistepScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler
import torch
import os
seed = 13
inference_steps = 25
#checkpoint = "CompVis/stable-diffusion-v1-4"
checkpoint = "stabilityai/stable-diffusion-2-1"
prompts = ["astronaut riding horse", "whale falling from sky", "magical forest", "highly photorealistic picture of johnny depp"]
prompts = 8 * ["highly photorealistic picture of johnny depp"]
#prompts = prompts[:1]
samplers = ["sample_dpmpp_2m", "sample_euler", "sample_heun", "sample_dpm_2", "sample_lms"]
#samplers = samplers[:1]
pipe = StableDiffusionKDiffusionPipeline.from_pretrained(checkpoint, torch_dtype=torch.float16, safety_checker=None)
pipe = pipe.to("cuda")
for i, prompt in enumerate(prompts):
prompt_f = f"{'_'.join(prompt.split())}_{i}"
for sampler in samplers:
pipe.set_scheduler(sampler)
torch.manual_seed(seed + i)
image = pipe(prompt, num_inference_steps=inference_steps).images[0]
checkpoint_f = f"{'--'.join(checkpoint.split('/'))}"
os.makedirs(f"/home/patrick_huggingface_co/images/{checkpoint_f}", exist_ok=True)
os.makedirs(f"/home/patrick_huggingface_co/images/{checkpoint_f}/{sampler}", exist_ok=True)
image.save(f"/home/patrick_huggingface_co/images/{checkpoint_f}/{sampler}/{prompt_f}.png")
pipe = StableDiffusionPipeline(**pipe.components)
pipe = pipe.to("cuda")
for i, prompt in enumerate(prompts):
prompt_f = f"{'_'.join(prompt.split())}_{i}"
for sampler in samplers:
if sampler == "sample_euler":
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler == "sample_heun":
pipe.scheduler = HeunDiscreteScheduler.from_config(pipe.scheduler.config)
elif sampler == "sample_dpmpp_2m":
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
elif sampler == "sample_lms":
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
torch.manual_seed(seed + i)
image = pipe(prompt, num_inference_steps=inference_steps).images[0]
checkpoint_f = f"{'--'.join(checkpoint.split('/'))}"
os.makedirs("/home/patrick_huggingface_co/images/{checkpoint_f}", exist_ok=True)
os.makedirs(f"/home/patrick_huggingface_co/images/{checkpoint_f}/{sampler}", exist_ok=True)
image.save(f"/home/patrick_huggingface_co/images/{checkpoint_f}/{sampler}/{prompt_f}_hf.png")