import gradio as gr import numpy as np import random from diffusers import StableDiffusionPipeline, LCMScheduler import torch device = "cuda" if torch.cuda.is_available() else "cpu" transformer = Transformer2DModel.from_pretrained( "PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer", torch_dtype=torch.float16 ) transformer = PeftModel.from_pretrained( transformer, "jasperai/flash-pixart" ) if torch.cuda.is_available(): torch.cuda.max_memory_allocated(device=device) pipe = PixArtAlphaPipeline.from_pretrained( "PixArt-alpha/PixArt-XL-2-1024-MS", transformer=transformer, torch_dtype=torch.float16 ) pipe.enable_xformers_memory_efficient_attention() pipe = pipe.to(device) else: pipe = PixArtAlphaPipeline.from_pretrained( "PixArt-alpha/PixArt-XL-2-1024-MS", transformer=transformer, torch_dtype=torch.float16 ) pipe = pipe.to(device) pipe.scheduler = LCMScheduler.from_pretrained( "PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="scheduler", timestep_spacing="trailing", ) pipe.load_lora_weights(adapter_id) pipe.fuse_lora() MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 1024 NUM_INFERENCE_STEPS = 4 def infer(prompt, seed, randomize_seed, num_inference_steps): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator().manual_seed(seed) image = pipe( prompt = prompt, guidance_scale = 0, num_inference_steps = num_inference_steps, generator = generator ).images[0] return image examples = [ "The image showcases a freshly baked bread, possibly focaccia, with rosemary sprigs and red pepper flakes sprinkled on top. It's sliced and placed on a wire cooling rack, with a bowl of mixed peppercorns beside it.", "A raccoon reading a book in a lush forest.", "A serene landscape showcases a winding road alongside a vast, turquoise lake, flanked by majestic snow-capped mountains under a partly cloudy sky.", ] css=""" #col-container { margin: 0 auto; max-width: 512px; } """ if torch.cuda.is_available(): power_device = "GPU" else: power_device = "CPU" with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f""" # ⚡ FlashDiffusion: FlashPixart ⚡ This is an interactive demo of [Flash Diffusion](https://huggingface.co/jasperai/flash-pixart), a diffusion distillation method proposed in [ADD ARXIV]() *by Clément Chadebec, Onur Tasar and Benjamin Aubin.* This model is a **66.5M** LoRA distilled version of [Pixart-α](https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS) model that is able to generate 1024x1024 images in **4 steps**. Currently running on {power_device}. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) gr.Examples( examples = examples, inputs = [prompt] ) run_button.click( fn = infer, inputs = [prompt, seed, randomize_seed, NUM_INFERENCE_STEPS], outputs = [result] ) demo.queue().launch()