import os import gc import gradio as gr import numpy as np import torch import json import spaces import config import utils import logging from PIL import Image, PngImagePlugin from datetime import datetime from diffusers.models import AutoencoderKL from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) DESCRIPTION = "PonyDiffusion V6 XL" if not torch.cuda.is_available(): DESCRIPTION += "\n

Running on CPU 🥶 This demo does not work on CPU.

" IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1" HF_TOKEN = os.getenv("HF_TOKEN") CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512")) MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048")) USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1" ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1" OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs") THUMBNAIL_SIZE = (128, 128) # Size for thumbnails MODEL = os.getenv( "MODEL", "https://huggingface.co/AstraliteHeart/pony-diffusion-v6/blob/main/v6.safetensors", ) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Store the generation history generation_history = [] def load_pipeline(model_name): # ... (rest of the function remains the same) @spaces.GPU def generate( prompt: str, negative_prompt: str = "", seed: int = 0, custom_width: int = 1024, custom_height: int = 1024, guidance_scale: float = 7.0, num_inference_steps: int = 30, sampler: str = "DPM++ 2M SDE Karras", aspect_ratio_selector: str = "1024 x 1024", use_upscaler: bool = False, upscaler_strength: float = 0.55, upscale_by: float = 1.5, progress=gr.Progress(track_tqdm=True), ) -> Image: # ... (rest of the function remains the same) try: # ... (existing code for image generation) if images: # Create thumbnail thumbnail = images[0].copy() thumbnail.thumbnail(THUMBNAIL_SIZE) # Add to generation history generation_history.append({ "prompt": prompt, "thumbnail": thumbnail, "metadata": metadata }) if IS_COLAB: for image in images: filepath = utils.save_image(image, metadata, OUTPUT_DIR) logger.info(f"Image saved as {filepath} with metadata") return images, metadata, update_history() except Exception as e: logger.exception(f"An error occurred: {e}") raise finally: if use_upscaler: del upscaler_pipe pipe.scheduler = backup_scheduler utils.free_memory() def update_history(): history_html = "
" for item in reversed(generation_history[-10:]): # Show last 10 entries thumbnail_path = f"data:image/png;base64,{utils.image_to_base64(item['thumbnail'])}" history_html += f"""

{item['prompt'][:50]}...

""" history_html += "
" return history_html if torch.cuda.is_available(): pipe = load_pipeline(MODEL) logger.info("Loaded on Device!") else: pipe = None with gr.Blocks(css="style.css") as demo: title = gr.HTML( f"""

{DESCRIPTION}

""", elem_id="title", ) gr.Markdown( f"""Gradio demo for ([Pony Diffusion V6]https://civitai.com/models/257749/pony-diffusion-v6-xl/)""", elem_id="subtitle", ) gr.DuplicateButton( value="Duplicate Space for private use", elem_id="duplicate-button", visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", ) with gr.Group(): with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=5, placeholder="Enter your prompt", container=False, ) run_button = gr.Button( "Generate", variant="primary", scale=0 ) result = gr.Gallery( label="Result", columns=1, preview=True, show_label=False ) # Add the history display history_display = gr.HTML(label="Generation History") with gr.Accordion(label="Advanced Settings", open=False): # ... (rest of the UI components remain the same) with gr.Accordion(label="Generation Parameters", open=False): gr_metadata = gr.JSON(label="Metadata", show_label=False) gr.Examples( examples=config.examples, inputs=prompt, outputs=[result, gr_metadata, history_display], fn=lambda *args, **kwargs: generate(*args, use_upscaler=True, **kwargs), cache_examples=CACHE_EXAMPLES, ) # ... (rest of the event handlers remain the same) inputs = [ prompt, negative_prompt, seed, custom_width, custom_height, guidance_scale, num_inference_steps, sampler, aspect_ratio_selector, use_upscaler, upscaler_strength, upscale_by, ] prompt.submit( fn=utils.randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then( fn=generate, inputs=inputs, outputs=[result, gr_metadata, history_display], api_name="run", ) negative_prompt.submit( fn=utils.randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then( fn=generate, inputs=inputs, outputs=[result, gr_metadata, history_display], api_name=False, ) run_button.click( fn=utils.randomize_seed_fn, inputs=[seed, randomize_seed], outputs=seed, queue=False, api_name=False, ).then( fn=generate, inputs=inputs, outputs=[result, gr_metadata, history_display], api_name=False, ) demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)