|
|
|
import torch |
|
from diffusers import UNetModel, DDIMScheduler |
|
import PIL |
|
import numpy as np |
|
import tqdm |
|
|
|
generator = torch.manual_seed(0) |
|
torch_device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
noise_scheduler = DDIMScheduler.from_config("fusing/ddpm-celeba-hq", tensor_format="pt") |
|
unet = UNetModel.from_pretrained("fusing/ddpm-celeba-hq").to(torch_device) |
|
|
|
|
|
image = torch.randn( |
|
(1, unet.in_channels, unet.resolution, unet.resolution), |
|
generator=generator, |
|
) |
|
image = image.to(torch_device) |
|
|
|
|
|
num_inference_steps = 50 |
|
eta = 0.0 |
|
|
|
for t in tqdm.tqdm(reversed(range(num_inference_steps)), total=num_inference_steps): |
|
|
|
orig_t = len(noise_scheduler) // num_inference_steps * t |
|
|
|
with torch.no_grad(): |
|
residual = unet(image, orig_t) |
|
|
|
|
|
pred_prev_image = noise_scheduler.step(residual, image, t, num_inference_steps, eta) |
|
|
|
|
|
variance = 0 |
|
if eta > 0: |
|
noise = torch.randn(image.shape, generator=generator).to(image.device) |
|
variance = noise_scheduler.get_variance(t).sqrt() * eta * noise |
|
|
|
|
|
image = pred_prev_image + variance |
|
|
|
|
|
image_processed = image.cpu().permute(0, 2, 3, 1) |
|
image_processed = (image_processed + 1.0) * 127.5 |
|
image_processed = image_processed.numpy().astype(np.uint8) |
|
image_pil = PIL.Image.fromarray(image_processed[0]) |
|
|
|
|
|
image_pil.save("test.png") |
|
|