Spaces:
Running
Running
File size: 8,474 Bytes
2c19098 712e3af 2c19098 26063e6 73696f4 2c19098 76b564d 73696f4 76b564d 73696f4 890944a 2c19098 76b564d 73696f4 9c52fdd 7b4bfcd 9c52fdd 712e3af 73696f4 712e3af 73696f4 712e3af 73696f4 76b564d 73696f4 5fad7fd 01f98b3 a735af2 01f98b3 a735af2 5fad7fd 890944a 5fad7fd 4cb8223 2c19098 5fad7fd 712e3af 73696f4 712e3af 73696f4 fe2765b 5fad7fd 712e3af a735af2 712e3af 01f98b3 712e3af a735af2 5fad7fd 712e3af 01b89ba 712e3af 01b89ba 7bd0a5a 73696f4 712e3af 7bd0a5a 712e3af 01b89ba fe2765b 73696f4 7bd0a5a 73696f4 2c19098 088c386 2c19098 73696f4 6ba7689 5fad7fd a735af2 73696f4 a735af2 73696f4 a735af2 73696f4 a735af2 01f98b3 2c19098 4cb8223 2c19098 73696f4 a735af2 73696f4 2c19098 73696f4 dbc8c64 2c19098 73696f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
from diffusers import StableDiffusionPipeline
from diffusers import StableDiffusionImg2ImgPipeline
import gradio as gr
import torch
from PIL import Image
import utils
is_colab = utils.is_google_colab()
max_width = 832
max_height = 832
class Model:
def __init__(self, name, path, prefix):
self.name = name
self.path = path
self.prefix = prefix
models = [
Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
Model("Modern Disney", "nitrosocke/modern-disney-diffusion", "modern disney style "),
Model("Classic Disney", "nitrosocke/classic-anim-diffusion", ""),
Model("Waifu", "hakurei/waifu-diffusion", ""),
Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
Model("Fuyuko Waifu", "yuk/fuyuko-waifu-diffusion", ""),
Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
Model("Robo Diffusion", "nousr/robo-diffusion", ""),
Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
Model("Hergé Style", "sd-dreambooth-library/herge-style", "herge_style "),
]
current_model = models[0]
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
if img is not None:
return img_to_img(model_name, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
else:
return txt_to_img(model_name, prompt, neg_prompt, guidance, steps, width, height, generator)
def txt_to_img(model_name, prompt, neg_prompt, guidance, steps, width, height, generator=None):
global current_model
global pipe
if model_name != current_model.name:
for model in models:
if model.name == model_name:
current_model = model
pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
prompt = current_model.prefix + prompt
results = pipe(
prompt,
negative_prompt=neg_prompt,
num_inference_steps=int(steps),
guidance_scale=guidance,
width=width,
height=height,
generator=generator)
image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
return image
def img_to_img(model, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
global current_model
global pipe
if model_name != current_model.name:
for model in models:
if model.name == model_name:
current_model = model
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
prompt = current_model.prefix + prompt
ratio = min(max_height / img.height, max_width / img.width)
img = img.resize((int(img.width * ratio), int(img.height * ratio)))
results = pipe(
prompt,
negative_prompt=neg_prompt,
init_image=img,
num_inference_steps=int(steps),
strength=strength,
guidance_scale=guidance,
width=width,
height=height,
generator=generator)
image = results.images[0] if not results.nsfw_content_detected[0] else Image.open("nsfw.png")
return image
css = """
<style>
.finetuned-diffusion-div {
text-align: center;
max-width: 700px;
margin: 0 auto;
}
.finetuned-diffusion-div div {
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
}
.finetuned-diffusion-div div h1 {
font-weight: 900;
margin-bottom: 7px;
}
.finetuned-diffusion-div p {
margin-bottom: 10px;
font-size: 94%;
}
.finetuned-diffusion-div p a {
text-decoration: underline;
}
</style>
"""
with gr.Blocks(css=css) as demo:
gr.HTML(
f"""
<div class="finetuned-diffusion-div">
<div>
<h1>Finetuned Diffusion</h1>
</div>
<p>
Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
<a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spiderverse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokemon</a>, <a href="https://huggingface.co/yuk/fuyuko-waifu-diffusion">Fuyuko Waifu</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony</a>, <a href="https://huggingface.co/sd-dreambooth-library/herge-style">Hergé (Tintin)</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>
</p> <br>
<p>
Running on <b>{device}</b>
</p>
</div>
"""
)
# gr.Markdown(f"Running on: {device}", elem_id="markdown_device")
with gr.Row():
with gr.Column():
model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=models[0].name)
prompt = gr.Textbox(label="Prompt", placeholder="Style prefix is applied automatically")
run = gr.Button(value="Run")
with gr.Tab("Options"):
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
steps = gr.Slider(label="Steps", value=50, maximum=100, minimum=2, step=1)
width = gr.Slider(label="Width", value=512, maximum=max_width, minimum=64, step=8)
height = gr.Slider(label="Height", value=512, maximum=max_height, minimum=64, step=8)
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
with gr.Tab("Image to image"):
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
with gr.Column():
image_out = gr.Image(height=512)
inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
prompt.submit(inference, inputs=inputs, outputs=image_out)
run.click(inference, inputs=inputs, outputs=image_out)
gr.Examples([
[models[0].name, "jason bateman disassembling the demon core", 7.5, 50],
[models[3].name, "portrait of dwayne johnson", 7.0, 75],
[models[4].name, "portrait of a beautiful alyx vance half life", 10, 50],
[models[5].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
[models[4].name, "fantasy portrait painting, digital art", 4.0, 30],
], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=not is_colab and torch.cuda.is_available())
gr.Markdown('''
Models by [@nitrosocke](https://huggingface.co/nitrosocke), [@Helixngc7293](https://twitter.com/DGSpitzer) and others. ❤️<br>
Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social)](https://twitter.com/hahahahohohe)
![visitors](https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion)
''')
if not is_colab:
demo.queue()
demo.launch(debug=is_colab) |