Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,112 Bytes
f646433 40457bb 55575a2 77414f0 25bf878 55575a2 40457bb 77414f0 f646433 e2138e2 40457bb 77414f0 2207e78 8c287bb 40457bb 77414f0 40457bb 77414f0 40457bb f646433 40457bb ea8e426 40457bb 77414f0 40457bb 22f8263 40457bb b584574 25bf878 ef4fdd8 edda1c8 ef4fdd8 25bf878 625830f 77414f0 fc967fc f646433 40457bb f646433 40457bb 77414f0 40457bb 77414f0 f646433 675f93f 40457bb f646433 675f93f f646433 675f93f f646433 8df6565 edda1c8 675f93f 40457bb 675f93f f646433 675f93f 40457bb 675f93f 7218a79 40457bb f646433 40457bb 675f93f 40457bb 675f93f 40457bb 675f93f 40457bb 675f93f 40457bb 94a832d 40457bb 5a326e4 40457bb 5a326e4 40457bb 675f93f 40457bb 22f8263 f646433 22f8263 f646433 40457bb f646433 77414f0 ea8e426 f646433 40457bb f646433 40457bb f646433 55575a2 f646433 be0aa53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import random
import gradio as gr
import numpy as np
import spaces
import torch
from diffusers import AutoPipelineForText2Image, AutoencoderKL #,EulerDiscreteScheduler
from compel import Compel, ReturnedEmbeddingsType
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>你现在运行在CPU上 但是只支持GPU.</p>"
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 4096
if torch.cuda.is_available():
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = AutoPipelineForText2Image.from_pretrained(
"John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl",
vae=vae,
torch_dtype=torch.float16,
use_safetensors=True,
add_watermarker=False
)
#pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
pipe.to("cuda")
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return seed
@spaces.GPU
def infer(
prompt: str,
negative_prompt: str = "",
use_negative_prompt: bool = False,
seed: int = 1,
width: int = 512,
height: int = 768,
guidance_scale: float = 3,
num_inference_steps: int = 30,
randomize_seed: bool = False,
use_resolution_binning: bool = True,
progress=gr.Progress(track_tqdm=True),
):
seed = int(randomize_seed_fn(seed, randomize_seed))
generator = torch.Generator().manual_seed(seed)
compel = Compel(tokenizer=pipeline.tokenizer, text_encoder=pipeline.text_encoder)
conditioning = compel.build_conditioning_tensor(prompt)
image = pipe(
#prompt=prompt,
prompt_embeds=conditioning,
negative_prompt=negative_prompt,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
use_resolution_binning=use_resolution_binning,
).images[0]
return image, seed
examples = [
"a cat eating a piece of cheese",
"a ROBOT riding a BLUE horse on Mars, photorealistic, 4k",
]
css = '''
.gradio-container{max-width: 560px !important}
h1{text-align:center}
footer {
visibility: hidden
}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown("""# 梦羽的模型生成器
### 快速生成NoobXL的模型图片.""")
with gr.Group():
with gr.Row():
prompt = gr.Text(
label="关键词",
show_label=False,
max_lines=1,
placeholder="输入你要的图片关键词",
container=False,
)
run_button = gr.Button("生成", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("高级选项", open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(label="使用反向词条", value=True)
negative_prompt = gr.Text(
label="反向词条",
max_lines=5,
lines=4,
placeholder="输入你要排除的图片关键词",
value="lowres, {bad}, error, fewer, extra, missing, worst quality, jpeg artifacts, bad quality, watermark, unfinished, displeasing, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]",
visible=True,
)
seed = gr.Slider(
label="种子",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="随机种子", value=True)
with gr.Row(visible=True):
width = gr.Slider(
label="宽度",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1024,
)
height = gr.Slider(
label="高度",
minimum=512,
maximum=MAX_IMAGE_SIZE,
step=64,
value=1536,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=0.1,
maximum=10,
step=0.1,
value=7.0,
)
num_inference_steps = gr.Slider(
label="生成步数",
minimum=1,
maximum=50,
step=1,
value=28,
)
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt,
)
gr.on(
triggers=[prompt.submit,run_button.click],
fn=infer,
inputs=[
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
guidance_scale,
num_inference_steps,
randomize_seed,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch() |