Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -66,10 +66,21 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
66 |
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
67 |
|
68 |
pipe = DiffusionPipeline.from_pretrained(model_id, variant="fp16")
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
pipe.to(device=DEVICE, dtype=torch.float16)
|
72 |
|
|
|
|
|
|
|
|
|
73 |
MAX_SEED = np.iinfo(np.int32).max
|
74 |
MAX_IMAGE_SIZE = 1024
|
75 |
|
@@ -92,17 +103,11 @@ def infer(
|
|
92 |
progress=gr.Progress(track_tqdm=True),
|
93 |
):
|
94 |
try:
|
95 |
-
|
96 |
-
|
97 |
new_adapter_id = user_lora_selector.replace("/", "_")
|
98 |
-
|
99 |
-
|
100 |
-
if "flash_lora" not in loaded_adapters["unet"] or new_adapter_id not in loaded_adapters["unet"]:
|
101 |
-
gr.Info("Loading LoRAs")
|
102 |
-
pipe.unload_lora_weights()
|
103 |
-
pipe.load_lora_weights(flash_sdxl_id, adapter_name="flash_lora")
|
104 |
-
pipe.load_lora_weights(user_lora_selector, adapter_name=new_adapter_id)
|
105 |
|
|
|
106 |
pipe.set_adapters(["flash_lora", new_adapter_id], adapter_weights=[1.0, user_lora_weight])
|
107 |
gr.Info("LoRA setup complete")
|
108 |
|
@@ -114,11 +119,12 @@ def infer(
|
|
114 |
if pre_prompt != "":
|
115 |
prompt = f"{pre_prompt} {prompt}"
|
116 |
|
|
|
117 |
image = pipe(
|
118 |
prompt=prompt,
|
119 |
negative_prompt=negative_prompt,
|
120 |
-
guidance_scale=guidance_scale
|
121 |
-
num_inference_steps=
|
122 |
generator=generator,
|
123 |
).images[0]
|
124 |
|
@@ -127,6 +133,165 @@ def infer(
|
|
127 |
gr.Error(f"An error occurred: {str(e)}")
|
128 |
return None
|
129 |
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
demo.queue().launch()
|
|
|
66 |
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
67 |
|
68 |
pipe = DiffusionPipeline.from_pretrained(model_id, variant="fp16")
|
69 |
+
|
70 |
+
# Create LCMScheduler with default config
|
71 |
+
lcm_scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
72 |
+
|
73 |
+
# Remove the 'skip_prk_steps' if it exists in the config
|
74 |
+
if hasattr(lcm_scheduler.config, 'skip_prk_steps'):
|
75 |
+
delattr(lcm_scheduler.config, 'skip_prk_steps')
|
76 |
+
|
77 |
+
pipe.scheduler = lcm_scheduler
|
78 |
pipe.to(device=DEVICE, dtype=torch.float16)
|
79 |
|
80 |
+
# Load Flash SDXL LoRA
|
81 |
+
flash_sdxl_id = "jasperai/flash-sdxl"
|
82 |
+
pipe.load_lora_weights(flash_sdxl_id, adapter_name="flash_lora")
|
83 |
+
|
84 |
MAX_SEED = np.iinfo(np.int32).max
|
85 |
MAX_IMAGE_SIZE = 1024
|
86 |
|
|
|
103 |
progress=gr.Progress(track_tqdm=True),
|
104 |
):
|
105 |
try:
|
106 |
+
# Load the user-selected LoRA
|
|
|
107 |
new_adapter_id = user_lora_selector.replace("/", "_")
|
108 |
+
pipe.load_lora_weights(user_lora_selector, adapter_name=new_adapter_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
+
# Set adapter weights
|
111 |
pipe.set_adapters(["flash_lora", new_adapter_id], adapter_weights=[1.0, user_lora_weight])
|
112 |
gr.Info("LoRA setup complete")
|
113 |
|
|
|
119 |
if pre_prompt != "":
|
120 |
prompt = f"{pre_prompt} {prompt}"
|
121 |
|
122 |
+
# Use Flash Diffusion settings
|
123 |
image = pipe(
|
124 |
prompt=prompt,
|
125 |
negative_prompt=negative_prompt,
|
126 |
+
guidance_scale=1.0, # Flash Diffusion typically uses guidance_scale=1
|
127 |
+
num_inference_steps=4, # Flash Diffusion uses fewer steps
|
128 |
generator=generator,
|
129 |
).images[0]
|
130 |
|
|
|
133 |
gr.Error(f"An error occurred: {str(e)}")
|
134 |
return None
|
135 |
|
136 |
+
css = """
|
137 |
+
h1 {
|
138 |
+
text-align: center;
|
139 |
+
display:block;
|
140 |
+
}
|
141 |
+
p {
|
142 |
+
text-align: justify;
|
143 |
+
display:block;
|
144 |
+
}
|
145 |
+
"""
|
146 |
+
|
147 |
+
with gr.Blocks(css=css) as demo:
|
148 |
+
gr.Markdown(
|
149 |
+
f"""
|
150 |
+
# β‘ FlashDiffusion: FlashLoRA β‘
|
151 |
+
This is an interactive demo of [Flash Diffusion](https://gojasper.github.io/flash-diffusion-project/) **on top of** existing LoRAs.
|
152 |
+
|
153 |
+
The distillation method proposed in [Flash Diffusion: Accelerating Any Conditional Diffusion Model for Few Steps Image Generation](http://arxiv.org/abs/2406.02347) *by ClΓ©ment Chadebec, Onur Tasar, Eyal Benaroche and Benjamin Aubin* from Jasper Research.
|
154 |
+
The LoRAs can be added **without** any retraining for similar results in most cases. Feel free to tweak the parameters and use your own LoRAs by giving a look at the [Github Repo](https://github.com/gojasper/flash-diffusion)
|
155 |
+
"""
|
156 |
+
)
|
157 |
+
gr.Markdown(
|
158 |
+
"If you enjoy the space, please also promote *open-source* by giving a β to our repo [![GitHub Stars](https://img.shields.io/github/stars/gojasper/flash-diffusion?style=social)](https://github.com/gojasper/flash-diffusion)"
|
159 |
+
)
|
160 |
+
|
161 |
+
gr_sdxl_loras = gr.State(value=sdxl_loras_raw)
|
162 |
+
gr_lora_id = gr.State(value="")
|
163 |
+
|
164 |
+
with gr.Row():
|
165 |
+
with gr.Blocks():
|
166 |
+
with gr.Column():
|
167 |
+
user_lora_selector = gr.Textbox(
|
168 |
+
label="Current Selected LoRA",
|
169 |
+
max_lines=1,
|
170 |
+
interactive=False,
|
171 |
+
)
|
172 |
+
|
173 |
+
user_lora_weight = gr.Slider(
|
174 |
+
label="Selected LoRA Weight",
|
175 |
+
minimum=0.5,
|
176 |
+
maximum=3,
|
177 |
+
step=0.1,
|
178 |
+
value=1,
|
179 |
+
)
|
180 |
+
|
181 |
+
gallery = gr.Gallery(
|
182 |
+
value=[(item["image"], item["title"]) for item in sdxl_loras_raw],
|
183 |
+
label="SDXL LoRA Gallery",
|
184 |
+
allow_preview=False,
|
185 |
+
columns=3,
|
186 |
+
elem_id="gallery",
|
187 |
+
show_share_button=False,
|
188 |
+
)
|
189 |
+
|
190 |
+
with gr.Column():
|
191 |
+
with gr.Row():
|
192 |
+
prompt = gr.Text(
|
193 |
+
label="Prompt",
|
194 |
+
show_label=False,
|
195 |
+
max_lines=1,
|
196 |
+
placeholder="Enter your prompt",
|
197 |
+
container=False,
|
198 |
+
scale=5,
|
199 |
+
)
|
200 |
+
|
201 |
+
run_button = gr.Button("Run", scale=1)
|
202 |
+
|
203 |
+
result = gr.Image(label="Result", show_label=False)
|
204 |
+
|
205 |
+
with gr.Accordion("Advanced Settings", open=False):
|
206 |
+
pre_prompt = gr.Text(
|
207 |
+
label="Pre-Prompt",
|
208 |
+
show_label=True,
|
209 |
+
max_lines=1,
|
210 |
+
placeholder="Pre Prompt from the LoRA config",
|
211 |
+
container=True,
|
212 |
+
scale=5,
|
213 |
+
)
|
214 |
+
|
215 |
+
seed = gr.Slider(
|
216 |
+
label="Seed",
|
217 |
+
minimum=0,
|
218 |
+
maximum=MAX_SEED,
|
219 |
+
step=1,
|
220 |
+
value=0,
|
221 |
+
)
|
222 |
+
|
223 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
224 |
+
|
225 |
+
with gr.Row():
|
226 |
+
num_inference_steps = gr.Slider(
|
227 |
+
label="Number of inference steps",
|
228 |
+
minimum=4,
|
229 |
+
maximum=8,
|
230 |
+
step=1,
|
231 |
+
value=4,
|
232 |
+
)
|
233 |
+
|
234 |
+
with gr.Row():
|
235 |
+
guidance_scale = gr.Slider(
|
236 |
+
label="Guidance Scale",
|
237 |
+
minimum=1,
|
238 |
+
maximum=6,
|
239 |
+
step=0.5,
|
240 |
+
value=1,
|
241 |
+
)
|
242 |
+
|
243 |
+
hint_negative = gr.Markdown(
|
244 |
+
"""π‘ _Hint : Negative Prompt will only work with Guidance > 1 but the model was
|
245 |
+
trained to be used with guidance = 1 (ie. without guidance).
|
246 |
+
Can degrade the results, use cautiously._"""
|
247 |
+
)
|
248 |
+
|
249 |
+
negative_prompt = gr.Text(
|
250 |
+
label="Negative Prompt",
|
251 |
+
show_label=False,
|
252 |
+
max_lines=1,
|
253 |
+
placeholder="Enter a negative Prompt",
|
254 |
+
container=False,
|
255 |
+
)
|
256 |
+
|
257 |
+
gr.on(
|
258 |
+
[
|
259 |
+
run_button.click,
|
260 |
+
seed.change,
|
261 |
+
randomize_seed.change,
|
262 |
+
prompt.submit,
|
263 |
+
negative_prompt.change,
|
264 |
+
negative_prompt.submit,
|
265 |
+
guidance_scale.change,
|
266 |
+
],
|
267 |
+
fn=infer,
|
268 |
+
inputs=[
|
269 |
+
pre_prompt,
|
270 |
+
prompt,
|
271 |
+
seed,
|
272 |
+
randomize_seed,
|
273 |
+
num_inference_steps,
|
274 |
+
negative_prompt,
|
275 |
+
guidance_scale,
|
276 |
+
user_lora_selector,
|
277 |
+
user_lora_weight,
|
278 |
+
],
|
279 |
+
outputs=[result],
|
280 |
+
)
|
281 |
+
|
282 |
+
gallery.select(
|
283 |
+
fn=update_selection,
|
284 |
+
inputs=[gr_sdxl_loras],
|
285 |
+
outputs=[
|
286 |
+
user_lora_selector,
|
287 |
+
pre_prompt,
|
288 |
+
],
|
289 |
+
show_progress="hidden",
|
290 |
+
)
|
291 |
+
|
292 |
+
gr.Markdown("**Disclaimer:**")
|
293 |
+
gr.Markdown(
|
294 |
+
"This demo is only for research purpose. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards."
|
295 |
+
)
|
296 |
|
297 |
demo.queue().launch()
|