import os import random import uuid import json import gradio as gr import numpy as np from PIL import Image import spaces import torch from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler if not torch.cuda.is_available(): DESCRIPTION += "\n

你现在运行在CPU上 但是只支持GPU.

" MAX_SEED = np.iinfo(np.int32).max CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1" MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096")) USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1" ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): pipe = StableDiffusionXLPipeline.from_pretrained( "John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False ) pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed @spaces.GPU(queue=False,duration=30) def generate( prompt: str, negative_prompt: str = "", use_negative_prompt: bool = False, seed: int = 1, width: int = 512, height: int = 768, guidance_scale: float = 3, num_inference_steps: int = 30, randomize_seed: bool = False, use_resolution_binning: bool = True, progress=gr.Progress(track_tqdm=True), ): pipe.to(device) seed = int(randomize_seed_fn(seed, randomize_seed)) generator = torch.Generator().manual_seed(seed) image= pipe( "prompt":prompt, "negative_prompt":negative_prompt, "width":width, "height":height, "guidance_scale":guidance_scale, "num_inference_steps":num_inference_steps, "generator":generator, "use_resolution_binning":use_resolution_binning, "output_type":"pil", ).images[0] return image, seed examples = [ "a cat eating a piece of cheese", "a ROBOT riding a BLUE horse on Mars, photorealistic, 4k", "Ironman VS Hulk, ultrarealistic", "Astronaut in a jungle, cold color palette, oil pastel, detailed, 8k", "An alien holding sign board contain word 'Flash', futuristic, neonpunk", "Kids going to school, Anime style" ] css = ''' .gradio-container{max-width: 560px !important} h1{text-align:center} footer { visibility: hidden } ''' with gr.Blocks(css=css) as demo: gr.Markdown("""# 梦羽的模型生成器 ### 快速生成NoobXL的模型图片.""") with gr.Group(): with gr.Row(): prompt = gr.Text( label="关键词", show_label=False, max_lines=1, placeholder="输入你要的图片关键词", container=False, ) run_button = gr.Button("生成", scale=0) result = gr.Gallery(label="返回", columns=1) with gr.Accordion("高级选项", open=False): with gr.Row(): use_negative_prompt = gr.Checkbox(label="使用反向词条", value=True) negative_prompt = gr.Text( label="反向词条", max_lines=5, lines=4, placeholder="输入你要排除的图片关键词", value="lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]", visible=True, ) seed = gr.Slider( label="种子", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="随机种子", value=True) with gr.Row(visible=True): width = gr.Slider( label="宽度", minimum=512, maximum=MAX_IMAGE_SIZE, step=64, value=1024, ) height = gr.Slider( label="高度", minimum=512, maximum=MAX_IMAGE_SIZE, step=64, value=1536, ) with gr.Row(): guidance_scale = gr.Slider( label="Guidance Scale", minimum=0.1, maximum=6, step=0.1, value=3.0, ) num_inference_steps = gr.Slider( label="生成步数", minimum=1, maximum=50, step=1, value=28, ) gr.Examples( examples=examples, inputs=prompt, outputs=[result, seed], fn=generate, cache_examples=CACHE_EXAMPLES, ) use_negative_prompt.change( fn=lambda x: gr.update(visible=x), inputs=use_negative_prompt, outputs=negative_prompt, ) gr.on( triggers=[ prompt.submit, negative_prompt.submit, run_button.click, ], fn=generate, inputs=[ prompt, negative_prompt, use_negative_prompt, seed, width, height, guidance_scale, num_inference_steps, randomize_seed, ], outputs=[result, seed], ) if __name__ == "__main__": demo.queue(max_size=20).launch()