Files changed (1) hide show
  1. app.py +293 -328
app.py CHANGED
@@ -1,344 +1,309 @@
 
1
  import gradio as gr
2
- from datasets import load_dataset
3
- from PIL import Image
 
 
 
 
4
 
5
- import re
6
- import os
7
- import requests
8
 
9
- from share_btn import community_icon_html, loading_icon_html, share_js
 
 
 
 
 
 
10
 
11
- word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True)
12
- word_list = word_list_dataset["train"]['text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- is_gpu_busy = False
15
- def infer(prompt, negative, scale):
16
- global is_gpu_busy
17
- for filter in word_list:
18
- if re.search(rf"\b{filter}\b", prompt):
19
- raise gr.Error("Unsafe content found. Please try again with different prompts.")
20
-
21
- images = []
22
- url = os.getenv('JAX_BACKEND_URL')
23
- payload = {'prompt': prompt, 'negative_prompt': negative, 'guidance_scale': scale}
24
- images_request = requests.post(url, json = payload)
25
- for image in images_request.json()["images"]:
26
- image_b64 = (f"data:image/jpeg;base64,{image}")
27
- images.append(image_b64)
28
-
29
- return images
 
 
 
 
 
 
 
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- css = """
33
- .gradio-container {
34
- font-family: 'IBM Plex Sans', sans-serif;
35
- }
36
- .gr-button {
37
- color: white;
38
- border-color: black;
39
- background: black;
40
- }
41
- input[type='range'] {
42
- accent-color: black;
43
- }
44
- .dark input[type='range'] {
45
- accent-color: #dfdfdf;
46
- }
47
- .container {
48
- max-width: 730px;
49
- margin: auto;
50
- padding-top: 1.5rem;
51
- }
52
- #gallery {
53
- min-height: 22rem;
54
- margin-bottom: 15px;
55
- margin-left: auto;
56
- margin-right: auto;
57
- border-bottom-right-radius: .5rem !important;
58
- border-bottom-left-radius: .5rem !important;
59
- }
60
- #gallery>div>.h-full {
61
- min-height: 20rem;
62
- }
63
- .details:hover {
64
- text-decoration: underline;
65
- }
66
- .gr-button {
67
- white-space: nowrap;
68
- }
69
- .gr-button:focus {
70
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
71
- outline: none;
72
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
73
- --tw-border-opacity: 1;
74
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
75
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
76
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
77
- --tw-ring-opacity: .5;
78
- }
79
- #advanced-btn {
80
- font-size: .7rem !important;
81
- line-height: 19px;
82
- margin-top: 12px;
83
- margin-bottom: 12px;
84
- padding: 2px 8px;
85
- border-radius: 14px !important;
86
- }
87
- #advanced-options {
88
- display: none;
89
- margin-bottom: 20px;
90
- }
91
- .footer {
92
- margin-bottom: 45px;
93
- margin-top: 35px;
94
- text-align: center;
95
- border-bottom: 1px solid #e5e5e5;
96
- }
97
- .footer>p {
98
- font-size: .8rem;
99
- display: inline-block;
100
- padding: 0 10px;
101
- transform: translateY(10px);
102
- background: white;
103
- }
104
- .dark .footer {
105
- border-color: #303030;
106
- }
107
- .dark .footer>p {
108
- background: #0b0f19;
109
- }
110
- .acknowledgments h4{
111
- margin: 1.25em 0 .25em 0;
112
- font-weight: bold;
113
- font-size: 115%;
114
- }
115
- .animate-spin {
116
- animation: spin 1s linear infinite;
117
- }
118
- @keyframes spin {
119
- from {
120
- transform: rotate(0deg);
121
- }
122
- to {
123
- transform: rotate(360deg);
124
- }
125
- }
126
- #share-btn-container {
127
- display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
128
- margin-top: 10px;
129
- margin-left: auto;
130
- }
131
- #share-btn {
132
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
133
- }
134
- #share-btn * {
135
- all: unset;
136
- }
137
- #share-btn-container div:nth-child(-n+2){
138
- width: auto !important;
139
- min-height: 0px !important;
140
- }
141
- #share-btn-container .wrap {
142
- display: none !important;
143
- }
144
 
145
- .gr-form{
146
- flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
147
- }
148
- #prompt-container{
149
- gap: 0;
150
- }
151
- #prompt-text-input, #negative-prompt-text-input{padding: .45rem 0.625rem}
152
- #component-16{border-top-width: 1px!important;margin-top: 1em}
153
- .image_duplication{position: absolute; width: 100px; left: 50px}
154
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
- block = gr.Blocks(css=css)
157
-
158
- examples = [
159
- [
160
- 'A high tech solarpunk utopia in the Amazon rainforest',
161
- 'low quality',
162
- 9
163
- ],
164
- [
165
- 'A pikachu fine dining with a view to the Eiffel Tower',
166
- 'low quality',
167
- 9
168
- ],
169
- [
170
- 'A mecha robot in a favela in expressionist style',
171
- 'low quality, 3d, photorealistic',
172
- 9
173
- ],
174
- [
175
- 'an insect robot preparing a delicious meal',
176
- 'low quality, illustration',
177
- 9
178
- ],
179
- [
180
- "A small cabin on top of a snowy mountain in the style of Disney, artstation",
181
- 'low quality, ugly',
182
- 9
183
- ],
184
- ]
185
-
186
-
187
- with block:
188
  gr.HTML(
189
- """
190
- <div style="text-align: center; margin: 0 auto;">
191
- <div
192
- style="
193
- display: inline-flex;
194
- align-items: center;
195
- gap: 0.8rem;
196
- font-size: 1.75rem;
197
- "
198
- >
199
- <svg
200
- width="0.65em"
201
- height="0.65em"
202
- viewBox="0 0 115 115"
203
- fill="none"
204
- xmlns="http://www.w3.org/2000/svg"
205
- >
206
- <rect width="23" height="23" fill="white"></rect>
207
- <rect y="69" width="23" height="23" fill="white"></rect>
208
- <rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
209
- <rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
210
- <rect x="46" width="23" height="23" fill="white"></rect>
211
- <rect x="46" y="69" width="23" height="23" fill="white"></rect>
212
- <rect x="69" width="23" height="23" fill="black"></rect>
213
- <rect x="69" y="69" width="23" height="23" fill="black"></rect>
214
- <rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
215
- <rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
216
- <rect x="115" y="46" width="23" height="23" fill="white"></rect>
217
- <rect x="115" y="115" width="23" height="23" fill="white"></rect>
218
- <rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
219
- <rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
220
- <rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
221
- <rect x="92" y="69" width="23" height="23" fill="white"></rect>
222
- <rect x="69" y="46" width="23" height="23" fill="white"></rect>
223
- <rect x="69" y="115" width="23" height="23" fill="white"></rect>
224
- <rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
225
- <rect x="46" y="46" width="23" height="23" fill="black"></rect>
226
- <rect x="46" y="115" width="23" height="23" fill="black"></rect>
227
- <rect x="46" y="69" width="23" height="23" fill="black"></rect>
228
- <rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
229
- <rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
230
- <rect x="23" y="69" width="23" height="23" fill="black"></rect>
231
- </svg>
232
- <h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
233
- Stable Diffusion 2.1 Demo
234
- </h1>
235
  </div>
236
- <p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
237
- Stable Diffusion 2.1 is the latest text-to-image model from StabilityAI. <a style="text-decoration: underline;" href="https://huggingface.co/spaces/stabilityai/stable-diffusion-1">Access Stable Diffusion 1 Space here</a><br>For faster generation and API
238
- access you can try
239
- <a
240
- href="http://beta.dreamstudio.ai/"
241
- style="text-decoration: underline;"
242
- target="_blank"
243
- >DreamStudio Beta</a
244
- >.</a>
245
  </p>
 
 
246
  </div>
247
  """
248
  )
249
- with gr.Group():
250
- with gr.Box():
251
- with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
252
- with gr.Column():
253
- text = gr.Textbox(
254
- label="Enter your prompt",
255
- show_label=False,
256
- max_lines=1,
257
- placeholder="Enter your prompt",
258
- elem_id="prompt-text-input",
259
- ).style(
260
- border=(True, False, True, True),
261
- rounded=(True, False, False, True),
262
- container=False,
263
- )
264
- negative = gr.Textbox(
265
- label="Enter your negative prompt",
266
- show_label=False,
267
- max_lines=1,
268
- placeholder="Enter a negative prompt",
269
- elem_id="negative-prompt-text-input",
270
- ).style(
271
- border=(True, False, True, True),
272
- rounded=(True, False, False, True),
273
- container=False,
274
- )
275
- btn = gr.Button("Generate image").style(
276
- margin=False,
277
- rounded=(False, True, True, False),
278
- full_width=False,
279
- )
280
-
281
- gallery = gr.Gallery(
282
- label="Generated images", show_label=False, elem_id="gallery"
283
- ).style(grid=[2], height="auto")
284
-
285
- with gr.Group(elem_id="container-advanced-btns"):
286
- #advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
287
- with gr.Group(elem_id="share-btn-container"):
288
- community_icon = gr.HTML(community_icon_html)
289
- loading_icon = gr.HTML(loading_icon_html)
290
- share_button = gr.Button("Share to community", elem_id="share-btn")
291
-
292
- with gr.Accordion("Advanced settings", open=False):
293
- # gr.Markdown("Advanced settings are temporarily unavailable")
294
- # samples = gr.Slider(label="Images", minimum=1, maximum=4, value=4, step=1)
295
- # steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=45, step=1)
296
- guidance_scale = gr.Slider(
297
- label="Guidance Scale", minimum=0, maximum=50, value=9, step=0.1
298
- )
299
- # seed = gr.Slider(
300
- # label="Seed",
301
- # minimum=0,
302
- # maximum=2147483647,
303
- # step=1,
304
- # randomize=True,
305
- # )
306
-
307
- ex = gr.Examples(examples=examples, fn=infer, inputs=[text, negative, guidance_scale], outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False)
308
- ex.dataset.headers = [""]
309
- negative.submit(infer, inputs=[text, negative, guidance_scale], outputs=[gallery], postprocess=False)
310
- text.submit(infer, inputs=[text, negative, guidance_scale], outputs=[gallery], postprocess=False)
311
- btn.click(infer, inputs=[text, negative, guidance_scale], outputs=[gallery], postprocess=False)
312
 
313
- #advanced_button.click(
314
- # None,
315
- # [],
316
- # text,
317
- # _js="""
318
- # () => {
319
- # const options = document.querySelector("body > gradio-app").querySelector("#advanced-options");
320
- # options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none";
321
- # }""",
322
- #)
323
- share_button.click(
324
- None,
325
- [],
326
- [],
327
- _js=share_js,
328
- )
329
- gr.HTML(
330
- """
331
- <div class="footer">
332
- <p>Model by <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">StabilityAI</a> - backend running JAX on TPUs due to generous support of <a href="https://sites.research.google/trc/about/" style="text-decoration: underline;" target="_blank">Google TRC program</a> - Gradio Demo by 🤗 Hugging Face
333
- </p>
334
- </div>
335
- <div class="acknowledgments">
336
- <p><h4>LICENSE</h4>
337
- The model is licensed with a <a href="https://huggingface.co/stabilityai/stable-diffusion-2/blob/main/LICENSE-MODEL" style="text-decoration: underline;" target="_blank">CreativeML OpenRAIL++</a> license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/CompVis/stable-diffusion-license" target="_blank" style="text-decoration: underline;" target="_blank">read the license</a></p>
338
- <p><h4>Biases and content acknowledgment</h4>
339
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a>, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the <a href="https://huggingface.co/CompVis/stable-diffusion-v1-4" style="text-decoration: underline;" target="_blank">model card</a></p>
340
- </div>
341
- """
342
- )
343
-
344
- block.queue(concurrency_count=80, max_size=100).launch(max_threads=150)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
  import gradio as gr
3
+ import torch
4
+ from PIL import Image
5
+ import utils
6
+ import datetime
7
+ import time
8
+ import psutil
9
 
10
+ start_time = time.time()
11
+ is_colab = utils.is_google_colab()
 
12
 
13
+ class Model:
14
+ def __init__(self, name, path="", prefix=""):
15
+ self.name = name
16
+ self.path = path
17
+ self.prefix = prefix
18
+ self.pipe_t2i = None
19
+ self.pipe_i2i = None
20
 
21
+ models = [
22
+ Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
23
+ Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
24
+ Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
25
+ Model("Anything V3", "Linaqruf/anything-v3.0", ""),
26
+ Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
27
+ Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
28
+ Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
29
+ Model("Wavyfusion", "wavymulder/wavyfusion", "wa-vy style "),
30
+ Model("Analog Diffusion", "wavymulder/Analog-Diffusion", "analog style "),
31
+ Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
32
+ Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
33
+ Model("Waifu", "hakurei/waifu-diffusion"),
34
+ Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
35
+ Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
36
+ Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"),
37
+ Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
38
+ Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
39
+ Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy "),
40
+ Model("Pokémon", "lambdalabs/sd-pokemon-diffusers"),
41
+ Model("Pony Diffusion", "AstraliteHeart/pony-diffusion"),
42
+ Model("Robo Diffusion", "nousr/robo-diffusion"),
43
+ ]
44
 
45
+ custom_model = None
46
+ if is_colab:
47
+ models.insert(0, Model("Custom model"))
48
+ custom_model = models[0]
49
+
50
+ last_mode = "txt2img"
51
+ current_model = models[1] if is_colab else models[0]
52
+ current_model_path = current_model.path
53
+
54
+ if is_colab:
55
+ pipe = StableDiffusionPipeline.from_pretrained(
56
+ current_model.path,
57
+ torch_dtype=torch.float16,
58
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
59
+ safety_checker=lambda images, clip_input: (images, False)
60
+ )
61
+
62
+ else:
63
+ pipe = StableDiffusionPipeline.from_pretrained(
64
+ current_model.path,
65
+ torch_dtype=torch.float16,
66
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
67
+ )
68
 
69
+ if torch.cuda.is_available():
70
+ pipe = pipe.to("cuda")
71
+ pipe.enable_xformers_memory_efficient_attention()
72
+
73
+ device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
74
+
75
+ def error_str(error, title="Error"):
76
+ return f"""#### {title}
77
+ {error}""" if error else ""
78
+
79
+ def custom_model_changed(path):
80
+ models[0].path = path
81
+ global current_model
82
+ current_model = models[0]
83
+
84
+ def on_model_change(model_name):
85
+
86
+ prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
87
+
88
+ return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
89
+
90
+ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
91
+
92
+ print(psutil.virtual_memory()) # print memory usage
93
+
94
+ global current_model
95
+ for model in models:
96
+ if model.name == model_name:
97
+ current_model = model
98
+ model_path = current_model.path
99
+
100
+ generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
101
+
102
+ try:
103
+ if img is not None:
104
+ return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator), None
105
+ else:
106
+ return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator), None
107
+ except Exception as e:
108
+ return None, error_str(e)
109
+
110
+ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator):
111
+
112
+ print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
113
+
114
+ global last_mode
115
+ global pipe
116
+ global current_model_path
117
+ if model_path != current_model_path or last_mode != "txt2img":
118
+ current_model_path = model_path
119
+
120
+ if is_colab or current_model == custom_model:
121
+ pipe = StableDiffusionPipeline.from_pretrained(
122
+ current_model_path,
123
+ torch_dtype=torch.float16,
124
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
125
+ safety_checker=lambda images, clip_input: (images, False)
126
+ )
127
+ else:
128
+ pipe = StableDiffusionPipeline.from_pretrained(
129
+ current_model_path,
130
+ torch_dtype=torch.float16,
131
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
132
+ )
133
+ # pipe = pipe.to("cpu")
134
+ # pipe = current_model.pipe_t2i
135
+
136
+ if torch.cuda.is_available():
137
+ pipe = pipe.to("cuda")
138
+ pipe.enable_xformers_memory_efficient_attention()
139
+ last_mode = "txt2img"
140
+
141
+ prompt = current_model.prefix + prompt
142
+ result = pipe(
143
+ prompt,
144
+ negative_prompt = neg_prompt,
145
+ num_images_per_prompt=n_images,
146
+ num_inference_steps = int(steps),
147
+ guidance_scale = guidance,
148
+ width = width,
149
+ height = height,
150
+ generator = generator)
151
 
152
+ return replace_nsfw_images(result)
153
+
154
+ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator):
155
+
156
+ print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
157
+
158
+ global last_mode
159
+ global pipe
160
+ global current_model_path
161
+ if model_path != current_model_path or last_mode != "img2img":
162
+ current_model_path = model_path
163
+
164
+ if is_colab or current_model == custom_model:
165
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
166
+ current_model_path,
167
+ torch_dtype=torch.float16,
168
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
169
+ safety_checker=lambda images, clip_input: (images, False)
170
+ )
171
+ else:
172
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
173
+ current_model_path,
174
+ torch_dtype=torch.float16,
175
+ scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
176
+ )
177
+ # pipe = pipe.to("cpu")
178
+ # pipe = current_model.pipe_i2i
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ if torch.cuda.is_available():
181
+ pipe = pipe.to("cuda")
182
+ pipe.enable_xformers_memory_efficient_attention()
183
+ last_mode = "img2img"
184
+
185
+ prompt = current_model.prefix + prompt
186
+ ratio = min(height / img.height, width / img.width)
187
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
188
+ result = pipe(
189
+ prompt,
190
+ negative_prompt = neg_prompt,
191
+ num_images_per_prompt=n_images,
192
+ image = img,
193
+ num_inference_steps = int(steps),
194
+ strength = strength,
195
+ guidance_scale = guidance,
196
+ # width = width,
197
+ # height = height,
198
+ generator = generator)
199
+
200
+ return replace_nsfw_images(result)
201
+
202
+ def replace_nsfw_images(results):
203
+
204
+ if is_colab:
205
+ return results.images
206
+
207
+ for i in range(len(results.images)):
208
+ if results.nsfw_content_detected[i]:
209
+ results.images[i] = Image.open("nsfw.png")
210
+ return results.images
211
 
212
+ css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
213
+ """
214
+ with gr.Blocks(css=css) as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  gr.HTML(
216
+ f"""
217
+ <div class="finetuned-diffusion-div">
218
+ <div>
219
+ <h1>Finetuned Diffusion</h1>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  </div>
221
+ <p>
222
+ Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
223
+ <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/mo-di-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/dallinmackay/Van-Gogh-diffusion">Loving Vincent (Van Gogh)</a>, <a href="https://huggingface.co/nitrosocke/redshift-diffusion">Redshift renderer (Cinema4D)</a>, <a href="https://huggingface.co/prompthero/midjourney-v4-diffusion">Midjourney v4 style</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a>, <a href="https://huggingface.co/Fictiverse/Stable_Diffusion_BalloonArt_Model">Balloon Art</a> + in colab notebook you can load any other Diffusers 🧨 SD model hosted on HuggingFace 🤗.
224
+ </p>
225
+ <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/qunash/42112fb104509c24fd3aa6d1c11dd6e0/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
226
+ Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
 
 
 
227
  </p>
228
+ <p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
229
+ <a style="display:inline-block" href="https://huggingface.co/spaces/anzorq/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
230
  </div>
231
  """
232
  )
233
+ with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
+ with gr.Column(scale=55):
236
+ with gr.Group():
237
+ model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
238
+ with gr.Box(visible=False) as custom_model_group:
239
+ custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
240
+ gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
241
+
242
+ with gr.Row():
243
+ prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
244
+ generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
245
+
246
+
247
+ # image_out = gr.Image(height=512)
248
+ gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
249
+
250
+ error_output = gr.Markdown()
251
+
252
+ with gr.Column(scale=45):
253
+ with gr.Tab("Options"):
254
+ with gr.Group():
255
+ neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
256
+
257
+ n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
258
+
259
+ with gr.Row():
260
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
261
+ steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
262
+
263
+ with gr.Row():
264
+ width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
265
+ height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
266
+
267
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
268
+
269
+ with gr.Tab("Image to image"):
270
+ with gr.Group():
271
+ image = gr.Image(label="Image", height=256, tool="editor", type="pil")
272
+ strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
273
+
274
+ if is_colab:
275
+ model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
276
+ custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
277
+ # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
278
+
279
+ inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
280
+ outputs = [gallery, error_output]
281
+ prompt.submit(inference, inputs=inputs, outputs=outputs)
282
+ generate.click(inference, inputs=inputs, outputs=outputs)
283
+
284
+ ex = gr.Examples([
285
+ [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25],
286
+ [models[4].name, "portrait of dwayne johnson", 7.0, 35],
287
+ [models[5].name, "portrait of a beautiful alyx vance half life", 10, 25],
288
+ [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30],
289
+ [models[5].name, "fantasy portrait painting, digital art", 4.0, 20],
290
+ ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
291
+
292
+ gr.HTML("""
293
+ <div style="border-top: 1px solid #303030;">
294
+ <br>
295
+ <p>Models by <a href="https://huggingface.co/nitrosocke">@nitrosocke</a>, <a href="https://twitter.com/haruu1367">@haruu1367</a>, <a href="https://twitter.com/DGSpitzer">@Helixngc7293</a>, <a href="https://twitter.com/dal_mack">@dal_mack</a>, <a href="https://twitter.com/prompthero">@prompthero</a> and others. ❤️</p>
296
+ <p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
297
+ <p>Space by:<br>
298
+ <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a><br>
299
+ <a href="https://github.com/qunash"><img alt="GitHub followers" src="https://img.shields.io/github/followers/qunash?style=social" alt="Github Follow"></a></p><br><br>
300
+ <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
301
+ <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion" alt="visitors"></p>
302
+ </div>
303
+ """)
304
+
305
+ print(f"Space built in {time.time() - start_time:.2f} seconds")
306
+
307
+ if not is_colab:
308
+ demo.queue(concurrency_count=1)
309
+ demo.launch(debug=is_colab, share=is_colab)