import os import random import uuid import gradio as gr import numpy as np import spaces import torch from diffusers import DiffusionPipeline MAX_SEED = np.iinfo(np.int32).max CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1" MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536")) device = torch.device("cuda:0") pipe = DiffusionPipeline.from_pretrained( "playgroundai/playground-v2.5-1024px-aesthetic", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False, variant="fp16" ) pipe.to(device) print("Loaded on Device!") def save_image(img): unique_name = str(uuid.uuid4()) + ".png" img.save(unique_name) return unique_name @spaces.GPU(enable_queue=True) def generate( prompt: str, progress=gr.Progress(track_tqdm=True), ): seed = random.randint(0, 2147483647) pipe.to(device) generator = torch.Generator().manual_seed(seed) images = pipe( prompt=prompt, negative_prompt=None, width=1024, height=1024, guidance_scale=3, num_inference_steps=25, generator=generator, num_images_per_prompt=1, use_resolution_binning=True, output_type="pil", ).images image_paths = [save_image(img) for img in images] return image_paths with gr.Blocks() as demo: gr.Markdown("# Blossom Playground v2.5") with gr.Group(): with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Gallery(label="Result", columns=1, show_label=False) gr.on( triggers=[ prompt.submit, run_button.click, ], fn=generate, inputs=[ prompt, ], outputs=[result], api_name="run", ) if __name__ == "__main__": demo.queue(max_size=20).launch()