Spaces:
Runtime error
Runtime error
fix
Browse files- app-txt2imglora.py +20 -20
- static/txt2imglora.html +4 -30
app-txt2imglora.py
CHANGED
@@ -14,7 +14,7 @@ from fastapi.responses import (
|
|
14 |
)
|
15 |
|
16 |
from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny
|
17 |
-
from compel import Compel
|
18 |
import torch
|
19 |
|
20 |
try:
|
@@ -35,11 +35,10 @@ MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
|
35 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
36 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
37 |
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
|
|
38 |
|
39 |
-
WIDTH =
|
40 |
-
HEIGHT =
|
41 |
-
# disable tiny autoencoder for better quality speed tradeoff
|
42 |
-
USE_TINY_AUTOENCODER = False
|
43 |
|
44 |
# check if MPS is available OSX only M1/M2/M3 chips
|
45 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
@@ -49,7 +48,7 @@ device = torch.device(
|
|
49 |
)
|
50 |
torch_device = device
|
51 |
# change to torch.float16 to save GPU memory
|
52 |
-
torch_dtype = torch.
|
53 |
|
54 |
print(f"TIMEOUT: {TIMEOUT}")
|
55 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
@@ -61,17 +60,15 @@ if mps_available:
|
|
61 |
torch_device = "cpu"
|
62 |
torch_dtype = torch.float32
|
63 |
|
64 |
-
model_id = "
|
|
|
65 |
|
66 |
if SAFETY_CHECKER == "True":
|
67 |
pipe = DiffusionPipeline.from_pretrained(model_id)
|
68 |
else:
|
69 |
pipe = DiffusionPipeline.from_pretrained(model_id, safety_checker=None)
|
70 |
-
|
71 |
-
|
72 |
-
pipe.vae = AutoencoderTiny.from_pretrained(
|
73 |
-
"madebyollin/taesd", torch_dtype=torch_dtype, use_safetensors=True
|
74 |
-
)
|
75 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
76 |
pipe.set_progress_bar_config(disable=True)
|
77 |
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
|
@@ -86,15 +83,19 @@ if TORCH_COMPILE:
|
|
86 |
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
87 |
|
88 |
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
89 |
-
|
90 |
# Load LCM LoRA
|
91 |
-
pipe.load_lora_weights(
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
compel_proc = Compel(
|
94 |
-
tokenizer=
|
95 |
-
text_encoder=
|
96 |
-
|
97 |
-
requires_pooled=[False, True],
|
98 |
)
|
99 |
user_queue_map = {}
|
100 |
|
@@ -112,10 +113,9 @@ class InputParams(BaseModel):
|
|
112 |
|
113 |
def predict(params: InputParams):
|
114 |
generator = torch.manual_seed(params.seed)
|
115 |
-
prompt_embeds
|
116 |
results = pipe(
|
117 |
prompt_embeds=prompt_embeds,
|
118 |
-
pooled_prompt_embeds=pooled_prompt_embeds,
|
119 |
generator=generator,
|
120 |
num_inference_steps=params.steps,
|
121 |
guidance_scale=params.guidance_scale,
|
|
|
14 |
)
|
15 |
|
16 |
from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny
|
17 |
+
from compel import Compel
|
18 |
import torch
|
19 |
|
20 |
try:
|
|
|
35 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
36 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
37 |
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
38 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
39 |
|
40 |
+
WIDTH = 512
|
41 |
+
HEIGHT = 512
|
|
|
|
|
42 |
|
43 |
# check if MPS is available OSX only M1/M2/M3 chips
|
44 |
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
|
|
48 |
)
|
49 |
torch_device = device
|
50 |
# change to torch.float16 to save GPU memory
|
51 |
+
torch_dtype = torch.float
|
52 |
|
53 |
print(f"TIMEOUT: {TIMEOUT}")
|
54 |
print(f"SAFETY_CHECKER: {SAFETY_CHECKER}")
|
|
|
60 |
torch_device = "cpu"
|
61 |
torch_dtype = torch.float32
|
62 |
|
63 |
+
model_id = "wavymulder/Analog-Diffusion"
|
64 |
+
lcm_lora_id = "lcm-sd/lcm-sd1.5-lora"
|
65 |
|
66 |
if SAFETY_CHECKER == "True":
|
67 |
pipe = DiffusionPipeline.from_pretrained(model_id)
|
68 |
else:
|
69 |
pipe = DiffusionPipeline.from_pretrained(model_id, safety_checker=None)
|
70 |
+
|
71 |
+
|
|
|
|
|
|
|
72 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
73 |
pipe.set_progress_bar_config(disable=True)
|
74 |
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
|
|
|
83 |
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
84 |
|
85 |
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
86 |
+
|
87 |
# Load LCM LoRA
|
88 |
+
pipe.load_lora_weights(
|
89 |
+
lcm_lora_id,
|
90 |
+
weight_name="lcm_sd_lora.safetensors",
|
91 |
+
adapter_name="lcm",
|
92 |
+
use_auth_token=HF_TOKEN,
|
93 |
+
)
|
94 |
|
95 |
compel_proc = Compel(
|
96 |
+
tokenizer=pipe.tokenizer,
|
97 |
+
text_encoder=pipe.text_encoder,
|
98 |
+
truncate_long_prompts=False,
|
|
|
99 |
)
|
100 |
user_queue_map = {}
|
101 |
|
|
|
113 |
|
114 |
def predict(params: InputParams):
|
115 |
generator = torch.manual_seed(params.seed)
|
116 |
+
prompt_embeds = compel_proc(params.prompt)
|
117 |
results = pipe(
|
118 |
prompt_embeds=prompt_embeds,
|
|
|
119 |
generator=generator,
|
120 |
num_inference_steps=params.steps,
|
121 |
guidance_scale=params.guidance_scale,
|
static/txt2imglora.html
CHANGED
@@ -74,8 +74,7 @@
|
|
74 |
}
|
75 |
|
76 |
async function promptUpdateStream(e) {
|
77 |
-
const
|
78 |
-
const [WIDTH, HEIGHT] = JSON.parse(dimension);
|
79 |
websocket.send(JSON.stringify({
|
80 |
"seed": getValue("#seed"),
|
81 |
"prompt": getValue("#prompt"),
|
@@ -210,14 +209,8 @@
|
|
210 |
using
|
211 |
<a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline"
|
212 |
target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG
|
213 |
-
stream server.
|
214 |
-
|
215 |
-
<p class="text-sm">
|
216 |
-
There are <span id="queue_size" class="font-bold">0</span> user(s) sharing the same GPU, affecting
|
217 |
-
real-time performance. Maximum queue size is 10. <a
|
218 |
-
href="https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model?duplicate=true"
|
219 |
-
target="_blank" class="text-blue-500 underline hover:no-underline">Duplicate</a> and run it on your
|
220 |
-
own GPU.
|
221 |
</p>
|
222 |
</article>
|
223 |
<div>
|
@@ -230,7 +223,7 @@
|
|
230 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
231 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none dark:text-black"
|
232 |
title=" Start your session and type your prompt here, you can see the result in real-time."
|
233 |
-
placeholder="Add your prompt here...">
|
234 |
</div>
|
235 |
|
236 |
</div>
|
@@ -238,25 +231,6 @@
|
|
238 |
<details>
|
239 |
<summary class="font-medium cursor-pointer">Advanced Options</summary>
|
240 |
<form class="grid grid-cols-3 items-center gap-3 py-3" id="params" action="">
|
241 |
-
<label class="text-sm font-medium" for="dimension">Image Dimensions</label>
|
242 |
-
<div class="col-span-2 flex gap-2">
|
243 |
-
<div class="flex gap-1">
|
244 |
-
<input type="radio" id="dimension512" name="dimension" value="[512,512]"
|
245 |
-
class="cursor-pointer">
|
246 |
-
<label for="dimension512" class="text-sm cursor-pointer">512x512</label>
|
247 |
-
</div>
|
248 |
-
<div class="flex gap-1">
|
249 |
-
<input type="radio" id="dimension768" name="dimension" value="[768,768]"
|
250 |
-
lass="cursor-pointer">
|
251 |
-
<label for="dimension768" class="text-sm cursor-pointer">768x768</label>
|
252 |
-
</div>
|
253 |
-
<div class="flex gap-1">
|
254 |
-
<input type="radio" id="dimension1024" name="dimension" value="[1024,1024]" checked
|
255 |
-
class="cursor-pointer">
|
256 |
-
<label for="dimension1024" class="text-sm cursor-pointer">1024x1024</label>
|
257 |
-
</div>
|
258 |
-
</div>
|
259 |
-
<!-- -->
|
260 |
<label class="text-sm font-medium " for="steps">Inference Steps
|
261 |
</label>
|
262 |
<input type="range" id="steps" name="steps" min="1" max="20" value="4"
|
|
|
74 |
}
|
75 |
|
76 |
async function promptUpdateStream(e) {
|
77 |
+
const [WIDTH, HEIGHT] = [512, 512];
|
|
|
78 |
websocket.send(JSON.stringify({
|
79 |
"seed": getValue("#seed"),
|
80 |
"prompt": getValue("#prompt"),
|
|
|
209 |
using
|
210 |
<a href="https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline"
|
211 |
target="_blank" class="text-blue-500 underline hover:no-underline">Diffusers</a> with a MJPEG
|
212 |
+
stream server. Featuring <a href="https://huggingface.co/wavymulder/Analog-Diffusion" target="_blank"
|
213 |
+
class="text-blue-500 underline hover:no-underline">Analog Diffusion</a> Model.
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
</p>
|
215 |
</article>
|
216 |
<div>
|
|
|
223 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
224 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 outline-none dark:text-black"
|
225 |
title=" Start your session and type your prompt here, you can see the result in real-time."
|
226 |
+
placeholder="Add your prompt here...">Analog style photograph of young Harrison Ford as Han Solo, star wars behind the scenes</textarea>
|
227 |
</div>
|
228 |
|
229 |
</div>
|
|
|
231 |
<details>
|
232 |
<summary class="font-medium cursor-pointer">Advanced Options</summary>
|
233 |
<form class="grid grid-cols-3 items-center gap-3 py-3" id="params" action="">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
<label class="text-sm font-medium " for="steps">Inference Steps
|
235 |
</label>
|
236 |
<input type="range" id="steps" name="steps" min="1" max="20" value="4"
|