anzorq commited on
Commit
327d86e
1 Parent(s): 43f5d96

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -50
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
@@ -15,35 +15,54 @@ class Model:
15
  self.pipe_i2i = None
16
 
17
  models = [
18
- Model("Custom model", "", ""),
19
  Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
20
  Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
21
  Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
22
  Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
23
  Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
24
  Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
 
 
 
25
  Model("Waifu", "hakurei/waifu-diffusion", ""),
26
  Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
27
  Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
28
  Model("Robo Diffusion", "nousr/robo-diffusion", ""),
29
  Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
30
  Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
31
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  last_mode = "txt2img"
34
- current_model = models[1]
35
  current_model_path = current_model.path
36
 
37
  if is_colab:
38
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
39
 
40
  else: # download all models
41
  vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
42
  for model in models[1:]:
43
  try:
44
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
45
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16)
46
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16)
47
  except:
48
  models.remove(model)
49
  pipe = models[1].pipe_t2i
@@ -58,7 +77,7 @@ def custom_model_changed(path):
58
  global current_model
59
  current_model = models[0]
60
 
61
- def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
62
 
63
  global current_model
64
  for model in models:
@@ -71,9 +90,9 @@ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0
71
  if img is not None:
72
  return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
73
  else:
74
- return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator)
75
 
76
- def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None):
77
 
78
  global last_mode
79
  global pipe
@@ -81,8 +100,8 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
81
  if model_path != current_model_path or last_mode != "txt2img":
82
  current_model_path = model_path
83
 
84
- if is_colab or current_model == models[0]:
85
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
86
  else:
87
  pipe.to("cpu")
88
  pipe = current_model.pipe_t2i
@@ -92,10 +111,17 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
92
  last_mode = "txt2img"
93
 
94
  prompt = current_model.prefix + prompt
 
 
 
 
 
95
  result = pipe(
96
  prompt,
97
  negative_prompt = neg_prompt,
98
  # num_images_per_prompt=n_images,
 
 
99
  num_inference_steps = int(steps),
100
  guidance_scale = guidance,
101
  width = width,
@@ -112,8 +138,8 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
112
  if model_path != current_model_path or last_mode != "img2img":
113
  current_model_path = model_path
114
 
115
- if is_colab or current_model == models[0]:
116
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
117
  else:
118
  pipe.to("cpu")
119
  pipe = current_model.pipe_i2i
@@ -145,38 +171,7 @@ def replace_nsfw_images(results):
145
  results.images[i] = Image.open("nsfw.png")
146
  return results.images[0]
147
 
148
- css = """
149
- <style>
150
- .finetuned-diffusion-div {
151
- text-align: center;
152
- max-width: 700px;
153
- margin: 0 auto;
154
- }
155
- .finetuned-diffusion-div div {
156
- display: inline-flex;
157
- align-items: center;
158
- gap: 0.8rem;
159
- font-size: 1.75rem;
160
- }
161
- .finetuned-diffusion-div div h1 {
162
- font-weight: 900;
163
- margin-bottom: 7px;
164
- }
165
- .finetuned-diffusion-div p {
166
- margin-bottom: 10px;
167
- font-size: 94%;
168
- }
169
- .finetuned-diffusion-div p a {
170
- text-decoration: underline;
171
- }
172
- .tabs {
173
- margin-top: 0px;
174
- margin-bottom: 0px;
175
- }
176
- #gallery {
177
- min-height: 20rem;
178
- }
179
- </style>
180
  """
181
  with gr.Blocks(css=css) as demo:
182
  gr.HTML(
@@ -189,7 +184,7 @@ with gr.Blocks(css=css) as demo:
189
  Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
190
  <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a> + any other custom Diffusers 🧨 SD model hosted on HuggingFace 🤗.
191
  </p>
192
- <p>Don't want to wait in queue? <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
193
  Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
194
  </p>
195
  </div>
@@ -223,7 +218,7 @@ with gr.Blocks(css=css) as demo:
223
 
224
  with gr.Row():
225
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
226
- steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=100, step=1)
227
 
228
  with gr.Row():
229
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
@@ -236,11 +231,15 @@ with gr.Blocks(css=css) as demo:
236
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
237
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
238
 
 
 
 
239
  model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
240
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
 
241
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
242
 
243
- inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
244
  prompt.submit(inference, inputs=inputs, outputs=image_out)
245
  generate.click(inference, inputs=inputs, outputs=image_out)
246
 
 
1
+ from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
 
15
  self.pipe_i2i = None
16
 
17
  models = [
 
18
  Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
19
  Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
20
  Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
21
  Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
22
  Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
23
  Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
24
+ Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
25
+ Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
26
+ Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
27
  Model("Waifu", "hakurei/waifu-diffusion", ""),
28
  Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
29
  Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
30
  Model("Robo Diffusion", "nousr/robo-diffusion", ""),
31
  Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
32
  Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
33
+ ]
34
+
35
+ scheduler = DPMSolverMultistepScheduler(
36
+ beta_start=0.00085,
37
+ beta_end=0.012,
38
+ beta_schedule="scaled_linear",
39
+ num_train_timesteps=1000,
40
+ trained_betas=None,
41
+ predict_epsilon=True,
42
+ thresholding=False,
43
+ algorithm_type="dpmsolver++",
44
+ solver_type="midpoint",
45
+ lower_order_final=True,
46
+ )
47
+
48
+ if is_colab:
49
+ models.insert(0, Model("Custom model", "", ""))
50
+ custom_model = models[0]
51
 
52
  last_mode = "txt2img"
53
+ current_model = models[1] if is_colab else models[0]
54
  current_model_path = current_model.path
55
 
56
  if is_colab:
57
+ pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler)
58
 
59
  else: # download all models
60
  vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
61
  for model in models[1:]:
62
  try:
63
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
64
+ model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
65
+ model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
66
  except:
67
  models.remove(model)
68
  pipe = models[1].pipe_t2i
 
77
  global current_model
78
  current_model = models[0]
79
 
80
+ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", inpaint_image=None):
81
 
82
  global current_model
83
  for model in models:
 
90
  if img is not None:
91
  return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator)
92
  else:
93
+ return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator, inpaint_image)
94
 
95
+ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator=None, inpaint_image=None):
96
 
97
  global last_mode
98
  global pipe
 
100
  if model_path != current_model_path or last_mode != "txt2img":
101
  current_model_path = model_path
102
 
103
+ if is_colab or current_model == custom_model:
104
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
105
  else:
106
  pipe.to("cpu")
107
  pipe = current_model.pipe_t2i
 
111
  last_mode = "txt2img"
112
 
113
  prompt = current_model.prefix + prompt
114
+
115
+ if inpaint_image is not None:
116
+ init_image = inpaint_image["image"].convert("RGB").resize((width, height))
117
+ mask = inpaint_image["mask"].convert("RGB").resize((width, height))
118
+
119
  result = pipe(
120
  prompt,
121
  negative_prompt = neg_prompt,
122
  # num_images_per_prompt=n_images,
123
+ image = init_image,
124
+ mask_image = mask,
125
  num_inference_steps = int(steps),
126
  guidance_scale = guidance,
127
  width = width,
 
138
  if model_path != current_model_path or last_mode != "img2img":
139
  current_model_path = model_path
140
 
141
+ if is_colab or current_model == custom_model:
142
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
143
  else:
144
  pipe.to("cpu")
145
  pipe = current_model.pipe_i2i
 
171
  results.images[i] = Image.open("nsfw.png")
172
  return results.images[0]
173
 
174
+ css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}.finetuned-diffusion-div p a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  """
176
  with gr.Blocks(css=css) as demo:
177
  gr.HTML(
 
184
  Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
185
  <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/modern-disney-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a> + any other custom Diffusers 🧨 SD model hosted on HuggingFace 🤗.
186
  </p>
187
+ <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
188
  Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
189
  </p>
190
  </div>
 
218
 
219
  with gr.Row():
220
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
221
+ steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
222
 
223
  with gr.Row():
224
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
 
231
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
232
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
233
 
234
+ with gr.Tab("Inpainting"):
235
+ inpaint_image = gr.Image(source='upload', tool='sketch', type="pil", label="Upload").style(height=256)
236
+
237
  model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
238
+ if is_colab:
239
+ custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
240
  # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
241
 
242
+ inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, inpaint_image]
243
  prompt.submit(inference, inputs=inputs, outputs=image_out)
244
  generate.click(inference, inputs=inputs, outputs=image_out)
245