|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
|
import tqdm |
|
from diffusers import DiffusionPipeline |
|
|
|
|
|
class DDPM(DiffusionPipeline): |
|
def __init__(self, unet, noise_scheduler): |
|
super().__init__() |
|
self.register_modules(unet=unet, noise_scheduler=noise_scheduler) |
|
|
|
def __call__(self, batch_size=1, generator=None, torch_device=None): |
|
if torch_device is None: |
|
torch_device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
self.unet.to(torch_device) |
|
|
|
|
|
image = self.noise_scheduler.sample_noise( |
|
(batch_size, self.unet.in_channels, self.unet.resolution, self.unet.resolution), |
|
device=torch_device, |
|
generator=generator, |
|
) |
|
|
|
num_prediction_steps = len(self.noise_scheduler) |
|
for t in tqdm.tqdm(reversed(range(num_prediction_steps)), total=num_prediction_steps): |
|
|
|
with torch.no_grad(): |
|
residual = self.unet(image, t) |
|
|
|
|
|
pred_prev_image = self.noise_scheduler.compute_prev_image_step(residual, image, t) |
|
|
|
|
|
variance = 0 |
|
if t > 0: |
|
noise = self.noise_scheduler.sample_noise(image.shape, device=image.device, generator=generator) |
|
variance = self.noise_scheduler.get_variance(t).sqrt() * noise |
|
|
|
|
|
image = pred_prev_image + variance |
|
|
|
return image |
|
|