import json import random import uuid import gradio as gr import spaces import torch from diffusers import DiffusionPipeline from transformers import AutoModelForCausalLM, AutoTokenizer device = torch.device("cuda:0") diffusion_pipe = DiffusionPipeline.from_pretrained( "playgroundai/playground-v2.5-1024px-aesthetic", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False, variant="fp16" ).to(device) def save_image(img): unique_name = str(uuid.uuid4()) + ".png" img.save(unique_name) return unique_name @spaces.GPU(enable_queue=True) def generate( prompt: str, progress=gr.Progress(track_tqdm=True), ): seed = random.randint(0, 2147483647) generator = torch.Generator().manual_seed(seed) images = diffusion_pipe( prompt=[prompt], negative_prompt=None, width=1024, height=1024, guidance_scale=3, num_inference_steps=25, generator=generator, num_images_per_prompt=1, use_resolution_binning=True, output_type="pil", ).images image_paths = [save_image(img) for img in images] return image_paths css = ''' .gradio-container{max-width: 560px !important} h1{text-align:center} ''' with gr.Blocks(css=css) as demo: gr.Markdown("# Playground v2.5") with gr.Group(): with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Gallery(label="Result", columns=2, rows=1, show_label=False) gr.on( triggers=[ prompt.submit, run_button.click, ], fn=generate, inputs=[ prompt, ], outputs=[result], api_name="run", ) if __name__ == "__main__": demo.queue(max_size=20).launch()