Spaces:
Sleeping
Sleeping
import gradio as gr | |
import os | |
import torch | |
from diffusers import AutoencoderTiny | |
from torchvision.transforms.functional import to_pil_image, center_crop, resize, to_tensor | |
device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu' | |
d_type = torch.float32 if device == 'mps' else torch.float16 | |
model_id = "madebyollin/taesd" | |
vae = AutoencoderTiny.from_pretrained(model_id, safetensors=True, torch_dtype=d_type).to(device) | |
def decode(image): | |
t = to_tensor(image).unsqueeze(0).to(device, dtype=d_type) | |
unscaled_t = vae.unscale_latents(t) | |
reconstructed = vae.decoder(unscaled_t).clamp(0, 1) | |
return to_pil_image(reconstructed[0]) | |
astronaut = os.path.join(os.path.dirname(__file__), "images/21.encoded.png") | |
def app(): | |
return gr.Interface(decode, | |
gr.Image(type="pil", | |
image_mode="RGBA", | |
mirror_webcam=False, | |
label='64x64', | |
value=astronaut), | |
gr.Image(type="pil", | |
image_mode="RGB", | |
label='512x512', | |
height=256, | |
width=256 | |
), | |
examples=[ | |
os.path.join(os.path.dirname(__file__), "images/18.encoded.png"), | |
os.path.join(os.path.dirname(__file__), "images/20.encoded.png") | |
]) | |
if __name__ == "__main__": | |
app().launch(share=True) | |