Spaces:
Runtime error
Runtime error
File size: 8,378 Bytes
723d961 32afe1c 723d961 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import torch
from torchvision.utils import make_grid
import math
from PIL import Image
from diffusion import create_diffusion
from diffusers.models import AutoencoderKL
import gradio as gr
from imagenet_class_data import IMAGENET_1K_CLASSES
from models import MDT_XL_2
import os
from huggingface_hub import snapshot_download
def load_model(image_size=256):
assert image_size in [256]
latent_size = image_size // 8
model = MDT_XL_2(input_size=latent_size, decode_layer=2).to(device)
models_path = snapshot_download("shgao/MDT-XL2")
ckpt_model_path = os.path.join(models_path, "mdt_xl2_v1_ckpt.pt")
state_dict = torch.load(
ckpt_model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
model.eval()
return model
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = load_model(image_size=256)
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to(device)
current_image_size = 256
current_vae_model = "stabilityai/sd-vae-ft-mse"
def generate(image_size, vae_model, class_label, cfg_scale, pow_scale, num_sampling_steps, seed):
n = 1
image_size = int(image_size.split("x")[0])
global current_image_size
if image_size != current_image_size:
global model
model = model.to("cpu")
del model
if device == "cuda":
torch.cuda.empty_cache()
model = load_model(image_size=image_size)
current_image_size = image_size
global current_vae_model
if vae_model != current_vae_model:
global vae
if device == "cuda":
vae.to("cpu")
del vae
vae = AutoencoderKL.from_pretrained(vae_model).to(device)
# Seed PyTorch:
torch.manual_seed(seed)
# Setup diffusion
diffusion = create_diffusion(str(num_sampling_steps))
# Create sampling noise:
latent_size = image_size // 8
z = torch.randn(n, 4, latent_size, latent_size, device=device)
y = torch.tensor([class_label] * n, device=device)
# Setup classifier-free guidance:
z = torch.cat([z, z], 0)
y_null = torch.tensor([1000] * n, device=device)
y = torch.cat([y, y_null], 0)
model_kwargs = dict(y=y, cfg_scale=cfg_scale, scale_pow=pow_scale)
# Sample images:
samples = diffusion.p_sample_loop(
model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device
)
samples, _ = samples.chunk(2, dim=0) # Remove null class samples
samples = vae.decode(samples / 0.18215).sample
# Convert to PIL.Image format:
samples = samples.mul(127.5).add_(128.0).clamp_(
0, 255).permute(0, 2, 3, 1).to("cpu", torch.uint8).numpy()
samples = [Image.fromarray(sample) for sample in samples]
return samples
description = '''This is a demo of our MDT image generation models. MDT is a class-conditional model trained on ImageNet-1K.'''
duplicate = '''Skip the queue by duplicating this space and upgrading to GPU in settings
<a href="https://huggingface.co/spaces/wpeebles/DiT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>'''
more_info = '''
# Masked Diffusion Transformer
[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/masked-diffusion-transformer-is-a-strong/image-generation-on-imagenet-256x256)](https://paperswithcode.com/sota/image-generation-on-imagenet-256x256?p=masked-diffusion-transformer-is-a-strong)
The official codebase for [Masked Diffusion Transformer is a Strong Image Synthesizer](https://arxiv.org/abs/2303.14389).
## Introduction
Despite its success in image synthesis, we observe that diffusion probabilistic models (DPMs) often lack contextual reasoning ability to learn the relations among object parts in an image, leading to a slow learning process.
To solve this issue, we propose a Masked Diffusion Transformer (MDT) that introduces a mask latent modeling scheme to explicitly enhance the DPMs’ ability of contextual relation learning among object semantic parts in an image. During training, MDT operates on the latent space to mask certain tokens. Then, an asymmetric masking diffusion transformer is designed to predict masked tokens from unmasked ones while maintaining the diffusion generation process. Our MDT can reconstruct the full information of an image from its incomplete contextual input, thus enabling it to learn the associated relations among image tokens.
Experimental results show that MDT achieves superior image synthesis performance, e.g. a new SoTA FID score on the ImageNet dataset, and has about 3× faster learning speed than the previous SoTA DiT.
## Citation
```
@misc{gao2023masked,
title={Masked Diffusion Transformer is a Strong Image Synthesizer},
author={Shanghua Gao and Pan Zhou and Ming-Ming Cheng and Shuicheng Yan},
year={2023},
eprint={2303.14389},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
## Acknowledgement
This demo is built based on the [DiT](https://github.com/facebookresearch/dit). Thanks!
'''
project_links = '''
<p style="text-align: center">
<a href="https://arxiv.org/abs/2303.14389">Paper</a> ·
<a href="https://github.com/sail-sg/MDT">GitHub</a></p>'''
examples = [
["256x256", "stabilityai/sd-vae-ft-mse",
"Welsh springer spaniel", 5.0, 0.01, 300, 30, 3000],
["256x256", "stabilityai/sd-vae-ft-mse",
"golden retriever", 5.0, 0.01, 300, 30, 3000],
["256x256", "stabilityai/sd-vae-ft-mse",
"sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita", 5.0, 0.01, 300, 30, 1],
["256x256", "stabilityai/sd-vae-ft-mse",
"cheeseburger", 5.0, 0.01, 300, 30, 2],
["256x256", "stabilityai/sd-vae-ft-mse", "macaw", 5.0, 0.01, 300, 30, 1],
]
with gr.Blocks() as demo:
gr.Markdown(
"<h1 style='text-align: center'>Masked Diffusion Transformer (MDT)</h1>")
gr.Markdown(project_links)
gr.Markdown(description)
gr.Markdown(duplicate)
with gr.Tabs():
with gr.TabItem('Generate'):
with gr.Row():
with gr.Column():
with gr.Row():
image_size = gr.inputs.Radio(
choices=["256x256"], default="256x256", label='MDT Model Resolution')
vae_model = gr.inputs.Radio(choices=["stabilityai/sd-vae-ft-mse", "stabilityai/sd-vae-ft-ema"],
default="stabilityai/sd-vae-ft-mse", label='VAE Decoder')
with gr.Row():
i1k_class = gr.inputs.Dropdown(
list(IMAGENET_1K_CLASSES.values()),
default='Welsh springer spaniel',
type="index", label='ImageNet-1K Class'
)
cfg_scale = gr.inputs.Slider(
minimum=0, maximum=25, step=0.1, default=5.0, label='Classifier-free Guidance Scale')
pow_scale = gr.inputs.Slider(
minimum=0, maximum=25, step=0.1, default=0.01, label='Classifier-free Guidance Weight Scaling')
steps = gr.inputs.Slider(
minimum=4, maximum=1000, step=1, default=300, label='Sampling Steps')
n = gr.inputs.Slider(
minimum=1, maximum=16, step=1, default=1, label='Number of Samples')
seed = gr.inputs.Number(default=30, label='Seed')
button = gr.Button("Generate", variant="primary")
with gr.Column():
output = gr.Gallery(label='Generated Images').style(
grid=[2], height="auto")
button.click(generate, inputs=[
image_size, vae_model, i1k_class, cfg_scale, pow_scale, steps, seed], outputs=[output])
with gr.Row():
ex = gr.Examples(examples=examples, fn=generate,
inputs=[image_size, vae_model, i1k_class,
cfg_scale, pow_scale, steps, seed],
outputs=[output],
cache_examples=True)
gr.Markdown(more_info)
demo.queue()
demo.launch()
|