paella / app.py
pcuenq's picture
pcuenq HF staff
Run model and prior in half precision.
5e43d22
raw
history blame
16.2 kB
import gradio as gr
import open_clip
import torch
from PIL import Image
from open_clip import tokenizer
from rudalle import get_vae
from einops import rearrange
from huggingface_hub import hf_hub_download
from modules import DenoiseUNet
from arroz import Diffuzz, PriorModel
model_repo = "pcuenq/Arroz_con_cosas"
model_file = "model_1b_img.pt"
prior_file = "prior_v1_1500k_ema_fp16.pt"
device = "cuda" if torch.cuda.is_available() else "cpu"
device_text = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
batch_size = 4
latent_shape = (64, 64)
generator_timesteps = 12
generator_cfg = 5
prior_timesteps = 60
prior_cfg = 3.0
prior_sampler = 'ddpm'
clip_embedding_shape = (batch_size, 1024)
def to_pil(images):
images = images.permute(0, 2, 3, 1).cpu().numpy()
images = (images * 255).round().astype("uint8")
images = [Image.fromarray(image) for image in images]
return images
def log(t, eps=1e-20):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature=1., dim=-1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim=dim)
def sample(model, c, x=None, negative_embeddings=None, mask=None, T=12, size=(32, 32), starting_t=0, temp_range=[1.0, 1.0], typical_filtering=True, typical_mass=0.2, typical_min_tokens=1, classifier_free_scale=-1, renoise_steps=11, renoise_mode='start'):
with torch.inference_mode():
r_range = torch.linspace(0, 1, T+1)[:-1][:, None].expand(-1, c.size(0)).to(c.device)
temperatures = torch.linspace(temp_range[0], temp_range[1], T)
preds = []
if x is None:
x = torch.randint(0, model.num_labels, size=(c.size(0), *size), device=c.device)
elif mask is not None:
noise = torch.randint(0, model.num_labels, size=(c.size(0), *size), device=c.device)
x = noise * mask + (1-mask) * x
init_x = x.clone()
for i in range(starting_t, T):
if renoise_mode == 'prev':
prev_x = x.clone()
r, temp = r_range[i], temperatures[i]
logits = model(x, c, r)
if classifier_free_scale >= 0:
if negative_embeddings is not None:
logits_uncond = model(x, negative_embeddings, r)
else:
logits_uncond = model(x, torch.zeros_like(c), r)
logits = torch.lerp(logits_uncond, logits, classifier_free_scale)
x = logits
x_flat = x.permute(0, 2, 3, 1).reshape(-1, x.size(1))
if typical_filtering:
x_flat_norm = torch.nn.functional.log_softmax(x_flat, dim=-1)
x_flat_norm_p = torch.exp(x_flat_norm)
entropy = -(x_flat_norm * x_flat_norm_p).nansum(-1, keepdim=True)
c_flat_shifted = torch.abs((-x_flat_norm) - entropy)
c_flat_sorted, x_flat_indices = torch.sort(c_flat_shifted, descending=False)
x_flat_cumsum = x_flat.gather(-1, x_flat_indices).softmax(dim=-1).cumsum(dim=-1)
last_ind = (x_flat_cumsum < typical_mass).sum(dim=-1)
sorted_indices_to_remove = c_flat_sorted > c_flat_sorted.gather(1, last_ind.view(-1, 1))
if typical_min_tokens > 1:
sorted_indices_to_remove[..., :typical_min_tokens] = 0
indices_to_remove = sorted_indices_to_remove.scatter(1, x_flat_indices, sorted_indices_to_remove)
x_flat = x_flat.masked_fill(indices_to_remove, -float("Inf"))
x_flat = torch.multinomial(x_flat.div(temp).softmax(-1), num_samples=1)[:, 0]
x = x_flat.view(x.size(0), *x.shape[2:])
if mask is not None:
x = x * mask + (1-mask) * init_x
if i < renoise_steps:
if renoise_mode == 'start':
x, _ = model.add_noise(x, r_range[i+1], random_x=init_x)
elif renoise_mode == 'prev':
x, _ = model.add_noise(x, r_range[i+1], random_x=prev_x)
else: # 'rand'
x, _ = model.add_noise(x, r_range[i+1])
preds.append(x.detach())
return preds
# Model loading
vqmodel = get_vae().to(device)
vqmodel.eval().requires_grad_(False)
clip_model, _, _ = open_clip.create_model_and_transforms('ViT-H-14', pretrained='laion2b_s32b_b79k')
clip_model = clip_model.to(device).eval().requires_grad_(False)
def encode(x):
return vqmodel.model.encode((2 * x - 1))[-1][-1]
def decode(img_seq, shape=(32,32)):
img_seq = img_seq.view(img_seq.shape[0], -1)
b, n = img_seq.shape
one_hot_indices = torch.nn.functional.one_hot(img_seq, num_classes=vqmodel.num_tokens).float()
z = (one_hot_indices @ vqmodel.model.quantize.embed.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h=shape[0], w=shape[1])
img = vqmodel.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
model_path = hf_hub_download(repo_id=model_repo, filename=model_file)
model = DenoiseUNet(num_labels=8192, c_clip=1024, c_hidden=1280, down_levels=[1, 2, 8, 32], up_levels=[32, 8, 2, 1])
model = model.to(device).half()
model.load_state_dict(torch.load(model_path, map_location=device))
model.eval().requires_grad_()
prior_path = hf_hub_download(repo_id=model_repo, filename=prior_file)
prior = PriorModel().to(device).half()
prior.load_state_dict(torch.load(prior_path, map_location=device))
prior.eval().requires_grad_(False)
diffuzz = Diffuzz(device=device)
# -----
def infer(prompt, negative_prompt):
tokenized_text = tokenizer.tokenize([prompt] * batch_size).to(device)
negative_text = tokenizer.tokenize([negative_prompt] * batch_size).to(device)
with torch.inference_mode():
with torch.autocast(device_type="cuda"):
clip_embeddings = clip_model.encode_text(tokenized_text)
neg_clip_embeddings = clip_model.encode_text(negative_text)
sampled_image_embeddings = diffuzz.sample(
prior, {'c': clip_embeddings}, clip_embedding_shape,
timesteps=prior_timesteps, cfg=prior_cfg, sampler=prior_sampler
)[-1]
images = sample(
model, sampled_image_embeddings, negative_embeddings=neg_clip_embeddings,
T=generator_timesteps, size=latent_shape, starting_t=0, temp_range=[2.0, 0.1],
typical_filtering=False, typical_mass=0.2, typical_min_tokens=1,
classifier_free_scale=generator_cfg, renoise_steps=generator_timesteps-1,
renoise_mode="start"
)
images = decode(images[-1], latent_shape)
return to_pil(images)
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
input[type='range'] {
accent-color: black;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.acknowledgments h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
}
#share-btn * {
all: unset;
}
.gr-form{
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
}
#prompt-container{
gap: 0;
}
"""
block = gr.Blocks(css=css)
with block:
gr.HTML(
f"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<svg
width="0.65em"
height="0.65em"
viewBox="0 0 115 115"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<rect width="23" height="23" fill="white"></rect>
<rect y="69" width="23" height="23" fill="white"></rect>
<rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="46" width="23" height="23" fill="white"></rect>
<rect x="46" y="69" width="23" height="23" fill="white"></rect>
<rect x="69" width="23" height="23" fill="black"></rect>
<rect x="69" y="69" width="23" height="23" fill="black"></rect>
<rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="115" y="46" width="23" height="23" fill="white"></rect>
<rect x="115" y="115" width="23" height="23" fill="white"></rect>
<rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="92" y="69" width="23" height="23" fill="white"></rect>
<rect x="69" y="46" width="23" height="23" fill="white"></rect>
<rect x="69" y="115" width="23" height="23" fill="white"></rect>
<rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="46" y="46" width="23" height="23" fill="black"></rect>
<rect x="46" y="115" width="23" height="23" fill="black"></rect>
<rect x="46" y="69" width="23" height="23" fill="black"></rect>
<rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
<rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
<rect x="23" y="69" width="23" height="23" fill="black"></rect>
</svg>
<h1 style="font-weight: 900; margin-bottom: 7px;">
Paella Demo
</h1>
</div>
<p>
Running on <b>{device_text}</b>
</p>
<p style="margin-bottom: 10px; font-size: 94%">
Paella is a novel text-to-image model that uses a compressed quantized latent space, based on a f8 VQGAN, and a masked training objective to achieve fast generation in ~10 inference steps.
</p>
</div>
"""
)
with gr.Group():
with gr.Box():
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
with gr.Column():
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
elem_id="prompt-text-input",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
negative = gr.Textbox(
label="Enter your negative prompt",
show_label=False,
max_lines=1,
placeholder="Enter a negative prompt",
elem_id="negative-prompt-text-input",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
btn = gr.Button("Generate image").style(
margin=False,
rounded=(False, True, True, False),
full_width=False,
)
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(grid=[2], height="auto")
text.submit(infer, inputs=[text, negative], outputs=gallery)
btn.click(infer, inputs=[text, negative], outputs=gallery)
gr.HTML(
"""
<div class="footer">
</div>
<div class="acknowledgments">
<p><h4>Resources</h4>
<a href="https://arxiv.org/abs/2211.07292" style="text-decoration: underline;">Paper</a>, <a href="https://github.com/dome272/Paella" style="text-decoration: underline;">official implementation</a>.
</p>
<p><h4>LICENSE</h4>
<a href="https://github.com/dome272/Paella/blob/main/LICENSE" style="text-decoration: underline;">MIT</a>.
</p>
<p><h4>Biases and content acknowledgment</h4>
Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on 600 million images from the improved <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B aesthetic</a> dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes.
</p>
</div>
"""
)
block.launch()