Spaces:
Sleeping
Sleeping
File size: 1,658 Bytes
ab87e9c 692d41a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import os
import torch
from diffusers import AutoencoderTiny
from torchvision.transforms.functional import to_pil_image, center_crop, resize, to_tensor
device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
d_type = torch.float32 if device == 'mps' else torch.float16
model_id = "madebyollin/taesd"
vae = AutoencoderTiny.from_pretrained(model_id, safetensors=True, torch_dtype=d_type).to(device)
@torch.no_grad()
def decode(image):
t = to_tensor(image).unsqueeze(0).to(device, dtype=d_type)
unscaled_t = vae.unscale_latents(t)
reconstructed = vae.decoder(unscaled_t).clamp(0, 1)
return to_pil_image(reconstructed[0])
astronaut = os.path.join(os.path.dirname(__file__), "images/21.encoded.png")
def app():
return gr.Interface(decode,
gr.Image(type="pil",
image_mode="RGBA",
mirror_webcam=False,
label='64x64',
value=astronaut),
gr.Image(type="pil",
image_mode="RGB",
label='512x512',
height=256,
width=256
),
examples=[
os.path.join(os.path.dirname(__file__), "images/18.encoded.png"),
os.path.join(os.path.dirname(__file__), "images/20.encoded.png")
])
if __name__ == "__main__":
app().launch(share=True)
|