Spaces:
Running
on
Zero
Running
on
Zero
SunderAli17
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -2,28 +2,28 @@
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
|
|
|
|
|
|
5 |
import gradio as gr
|
6 |
import numpy as np
|
|
|
7 |
import spaces
|
8 |
-
import random
|
9 |
-
from diffusers import AutoencoderKL, DiffusionPipeline
|
10 |
import torch
|
|
|
11 |
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
**Parts of codes are adopted from [@hysts's SD-XL demo](https://huggingface.co/spaces/hysts/SD-XL) running on A10G GPU **
|
22 |
-
|
23 |
-
You can check out more of my spaces. Demo by [Sunder Ali Khowaja](https://sander-ali.github.io) - [Github](https://github.com/sander-ali)
|
24 |
"""
|
25 |
if not torch.cuda.is_available():
|
26 |
-
|
27 |
|
28 |
MAX_SEED = np.iinfo(np.int32).max
|
29 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
|
@@ -32,14 +32,25 @@ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
|
|
32 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
33 |
ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
|
34 |
|
35 |
-
|
36 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
37 |
if torch.cuda.is_available():
|
38 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
39 |
-
pipe = DiffusionPipeline.from_pretrained(
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
41 |
if ENABLE_REFINER:
|
42 |
-
refiner = DiffusionPipeline.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
if ENABLE_CPU_OFFLOAD:
|
44 |
pipe.enable_model_cpu_offload()
|
45 |
if ENABLE_REFINER:
|
@@ -48,19 +59,28 @@ if torch.cuda.is_available():
|
|
48 |
pipe.to(device)
|
49 |
if ENABLE_REFINER:
|
50 |
refiner.to(device)
|
|
|
51 |
if USE_TORCH_COMPILE:
|
52 |
-
pipe.unet = torch.compile(pipe.unet, mode=
|
53 |
if ENABLE_REFINER:
|
54 |
-
refiner.unet = torch.compile(refiner.unet, mode="
|
|
|
55 |
|
56 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
57 |
if randomize_seed:
|
58 |
seed = random.randint(0, MAX_SEED)
|
59 |
return seed
|
60 |
|
61 |
-
|
62 |
-
|
|
|
63 |
prompt: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
seed: int = 0,
|
65 |
width: int = 1024,
|
66 |
height: int = 1024,
|
@@ -69,12 +89,6 @@ def infer(
|
|
69 |
num_inference_steps_base: int = 25,
|
70 |
num_inference_steps_refiner: int = 25,
|
71 |
apply_refiner: bool = False,
|
72 |
-
negative_prompt: str = "",
|
73 |
-
prompt_2: str = "",
|
74 |
-
negative_prompt_2: str = "",
|
75 |
-
use_negative_prompt: bool = False,
|
76 |
-
use_prompt_2: bool = False,
|
77 |
-
use_negative_prompt_2: bool = False,
|
78 |
progress=gr.Progress(track_tqdm=True),
|
79 |
) -> PIL.Image.Image:
|
80 |
print(f"** Generating image for: \"{prompt}\" **")
|
@@ -125,85 +139,22 @@ def infer(
|
|
125 |
).images[0]
|
126 |
return image
|
127 |
|
|
|
128 |
examples = [
|
129 |
-
"
|
130 |
"An astronaut riding a green horse",
|
131 |
-
"A delicious ceviche cheesecake slice",
|
132 |
]
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
margin: 0 auto;
|
137 |
-
max-width: 520px;
|
138 |
-
}
|
139 |
-
"""
|
140 |
-
|
141 |
-
# if torch.cuda.is_available():
|
142 |
-
# power_device = "GPU"
|
143 |
-
# else:
|
144 |
-
# power_device = "CPU"
|
145 |
-
theme = gr.themes.Glass(
|
146 |
-
primary_hue="blue",
|
147 |
-
secondary_hue="blue",
|
148 |
-
neutral_hue="gray",
|
149 |
-
text_size="md",
|
150 |
-
spacing_size="md",
|
151 |
-
radius_size="md",
|
152 |
-
font=[gr.themes.GoogleFont('Source Sans Pro'), 'ui-sans-serif', 'system-ui', 'sans-serif'],
|
153 |
-
).set(
|
154 |
-
body_background_fill_dark='*background_fill_primary',
|
155 |
-
background_fill_primary_dark='*neutral_950',
|
156 |
-
background_fill_secondary='*neutral_50',
|
157 |
-
background_fill_secondary_dark='*neutral_900',
|
158 |
-
border_color_primary_dark='*neutral_700',
|
159 |
-
block_background_fill='*background_fill_primary',
|
160 |
-
block_background_fill_dark='*neutral_800',
|
161 |
-
block_border_width='1px',
|
162 |
-
block_label_background_fill='*background_fill_primary',
|
163 |
-
block_label_background_fill_dark='*background_fill_secondary',
|
164 |
-
block_label_text_color='*neutral_500',
|
165 |
-
block_label_text_size='*text_sm',
|
166 |
-
block_label_text_weight='400',
|
167 |
-
block_shadow='none',
|
168 |
-
block_shadow_dark='none',
|
169 |
-
block_title_text_color='*neutral_500',
|
170 |
-
block_title_text_weight='400',
|
171 |
-
panel_border_width='0',
|
172 |
-
panel_border_width_dark='0',
|
173 |
-
checkbox_background_color_dark='*neutral_800',
|
174 |
-
checkbox_border_width='*input_border_width',
|
175 |
-
checkbox_label_border_width='*input_border_width',
|
176 |
-
input_background_fill='*neutral_100',
|
177 |
-
input_background_fill_dark='*neutral_700',
|
178 |
-
input_border_color_focus_dark='*neutral_700',
|
179 |
-
input_border_width='0px',
|
180 |
-
input_border_width_dark='0px',
|
181 |
-
slider_color='#2563eb',
|
182 |
-
slider_color_dark='#2563eb',
|
183 |
-
table_even_background_fill_dark='*neutral_950',
|
184 |
-
table_odd_background_fill_dark='*neutral_900',
|
185 |
-
button_border_width='*input_border_width',
|
186 |
-
button_shadow_active='none',
|
187 |
-
button_primary_background_fill='*primary_200',
|
188 |
-
button_primary_background_fill_dark='*primary_700',
|
189 |
-
button_primary_background_fill_hover='*button_primary_background_fill',
|
190 |
-
button_primary_background_fill_hover_dark='*button_primary_background_fill',
|
191 |
-
button_secondary_background_fill='*neutral_200',
|
192 |
-
button_secondary_background_fill_dark='*neutral_600',
|
193 |
-
button_secondary_background_fill_hover='*button_secondary_background_fill',
|
194 |
-
button_secondary_background_fill_hover_dark='*button_secondary_background_fill',
|
195 |
-
button_cancel_background_fill='*button_secondary_background_fill',
|
196 |
-
button_cancel_background_fill_dark='*button_secondary_background_fill',
|
197 |
-
button_cancel_background_fill_hover='*button_cancel_background_fill',
|
198 |
-
button_cancel_background_fill_hover_dark='*button_cancel_background_fill'
|
199 |
)
|
200 |
-
|
201 |
-
|
202 |
with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
|
203 |
-
|
204 |
-
gr.
|
205 |
-
|
206 |
-
|
|
|
|
|
207 |
with gr.Group():
|
208 |
prompt = gr.Text(
|
209 |
label="Prompt",
|
@@ -297,7 +248,7 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
|
|
297 |
examples=examples,
|
298 |
inputs=prompt,
|
299 |
outputs=result,
|
300 |
-
fn=
|
301 |
cache_examples=CACHE_EXAMPLES,
|
302 |
)
|
303 |
|
@@ -344,7 +295,7 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
|
|
344 |
queue=False,
|
345 |
api_name=False,
|
346 |
).then(
|
347 |
-
fn=
|
348 |
inputs=[
|
349 |
prompt,
|
350 |
negative_prompt,
|
@@ -365,5 +316,6 @@ with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
|
|
365 |
outputs=result,
|
366 |
api_name="run",
|
367 |
)
|
|
|
368 |
if __name__ == "__main__":
|
369 |
-
demo.queue(max_size=20, api_open=False).launch(show_api=False
|
|
|
2 |
|
3 |
from __future__ import annotations
|
4 |
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
|
8 |
import gradio as gr
|
9 |
import numpy as np
|
10 |
+
import PIL.Image
|
11 |
import spaces
|
|
|
|
|
12 |
import torch
|
13 |
+
from diffusers import AutoencoderKL, DiffusionPipeline
|
14 |
|
15 |
+
DESCRIPTION = """
|
16 |
+
# OpenDalle 1.1
|
17 |
+
**Demo by [mrfakename](https://mrfake.name/) - [Twitter](https://twitter.com/realmrfakename) - [GitHub](https://github.com/fakerybakery/) - [Hugging Face](https://huggingface.co/mrfakename)**
|
18 |
+
This is a demo of <a href="https://huggingface.co/dataautogpt3/OpenDalleV1.1">OpenDalle V1.1</a> by @dataautogpt3.
|
19 |
+
It's a merge of several different models and is supposed to provide excellent performance. Try it out!
|
20 |
+
[Not Working?](https://huggingface.co/spaces/mrfakename/OpenDalleV1.1-GPU-Demo/discussions/4)
|
21 |
+
**The code for this demo is based on [@hysts's SD-XL demo](https://huggingface.co/spaces/hysts/SD-XL) running on a A10G GPU.**
|
22 |
+
**NOTE: The model is licensed under a non-commercial license**
|
23 |
+
Also see [OpenDalle Original Demo](https://huggingface.co/spaces/mrfakename/OpenDalle-GPU-Demo/)
|
|
|
|
|
|
|
24 |
"""
|
25 |
if not torch.cuda.is_available():
|
26 |
+
DESCRIPTION += "\n<h1>Running on CPU 🥶 This demo does not work on CPU. Please use <a href=\"https://huggingface.co/spaces/mrfakename/OpenDalleV1.1-GPU-Demo\">the online demo</a> instead</h1>"
|
27 |
|
28 |
MAX_SEED = np.iinfo(np.int32).max
|
29 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
|
|
|
32 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
33 |
ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
|
34 |
|
|
|
35 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
36 |
if torch.cuda.is_available():
|
37 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
38 |
+
pipe = DiffusionPipeline.from_pretrained(
|
39 |
+
"dataautogpt3/OpenDalleV1.1",
|
40 |
+
vae=vae,
|
41 |
+
torch_dtype=torch.float16,
|
42 |
+
use_safetensors=True,
|
43 |
+
variant="fp16",
|
44 |
+
)
|
45 |
if ENABLE_REFINER:
|
46 |
+
refiner = DiffusionPipeline.from_pretrained(
|
47 |
+
"stabilityai/stable-diffusion-xl-refiner-1.0",
|
48 |
+
vae=vae,
|
49 |
+
torch_dtype=torch.float16,
|
50 |
+
use_safetensors=True,
|
51 |
+
variant="fp16",
|
52 |
+
)
|
53 |
+
|
54 |
if ENABLE_CPU_OFFLOAD:
|
55 |
pipe.enable_model_cpu_offload()
|
56 |
if ENABLE_REFINER:
|
|
|
59 |
pipe.to(device)
|
60 |
if ENABLE_REFINER:
|
61 |
refiner.to(device)
|
62 |
+
|
63 |
if USE_TORCH_COMPILE:
|
64 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
65 |
if ENABLE_REFINER:
|
66 |
+
refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
|
67 |
+
|
68 |
|
69 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
70 |
if randomize_seed:
|
71 |
seed = random.randint(0, MAX_SEED)
|
72 |
return seed
|
73 |
|
74 |
+
|
75 |
+
@spaces.GPU
|
76 |
+
def generate(
|
77 |
prompt: str,
|
78 |
+
negative_prompt: str = "",
|
79 |
+
prompt_2: str = "",
|
80 |
+
negative_prompt_2: str = "",
|
81 |
+
use_negative_prompt: bool = False,
|
82 |
+
use_prompt_2: bool = False,
|
83 |
+
use_negative_prompt_2: bool = False,
|
84 |
seed: int = 0,
|
85 |
width: int = 1024,
|
86 |
height: int = 1024,
|
|
|
89 |
num_inference_steps_base: int = 25,
|
90 |
num_inference_steps_refiner: int = 25,
|
91 |
apply_refiner: bool = False,
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
progress=gr.Progress(track_tqdm=True),
|
93 |
) -> PIL.Image.Image:
|
94 |
print(f"** Generating image for: \"{prompt}\" **")
|
|
|
139 |
).images[0]
|
140 |
return image
|
141 |
|
142 |
+
|
143 |
examples = [
|
144 |
+
"A realistic photograph of an astronaut in a jungle, cold color palette, detailed, 8k",
|
145 |
"An astronaut riding a green horse",
|
|
|
146 |
]
|
147 |
|
148 |
+
theme = gr.themes.Base(
|
149 |
+
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
)
|
|
|
|
|
151 |
with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
|
152 |
+
gr.Markdown(DESCRIPTION)
|
153 |
+
gr.DuplicateButton(
|
154 |
+
value="Duplicate Space for private use",
|
155 |
+
elem_id="duplicate-button",
|
156 |
+
visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
|
157 |
+
)
|
158 |
with gr.Group():
|
159 |
prompt = gr.Text(
|
160 |
label="Prompt",
|
|
|
248 |
examples=examples,
|
249 |
inputs=prompt,
|
250 |
outputs=result,
|
251 |
+
fn=generate,
|
252 |
cache_examples=CACHE_EXAMPLES,
|
253 |
)
|
254 |
|
|
|
295 |
queue=False,
|
296 |
api_name=False,
|
297 |
).then(
|
298 |
+
fn=generate,
|
299 |
inputs=[
|
300 |
prompt,
|
301 |
negative_prompt,
|
|
|
316 |
outputs=result,
|
317 |
api_name="run",
|
318 |
)
|
319 |
+
|
320 |
if __name__ == "__main__":
|
321 |
+
demo.queue(max_size=20, api_open=False).launch(show_api=False)
|