Spaces:
Runtime error
Runtime error
sfast
Browse files- app.py +13 -12
- requirements.txt +5 -2
app.py
CHANGED
@@ -12,6 +12,11 @@ import numpy as np
|
|
12 |
import gradio as gr
|
13 |
import psutil
|
14 |
import time
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
17 |
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
@@ -40,22 +45,18 @@ else:
|
|
40 |
pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", safety_checker=None)
|
41 |
|
42 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
43 |
-
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
|
44 |
pipe.unet.to(memory_format=torch.channels_last)
|
45 |
pipe.set_progress_bar_config(disable=True)
|
46 |
-
|
47 |
-
# check if computer has less than 64GB of RAM using sys or os
|
48 |
-
if psutil.virtual_memory().total < 64 * 1024**3:
|
49 |
-
pipe.enable_attention_slicing()
|
50 |
-
|
51 |
-
if TORCH_COMPILE:
|
52 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
53 |
-
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
54 |
-
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
55 |
-
|
56 |
-
# Load LCM LoRA
|
57 |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
58 |
pipe.fuse_lora()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
|
61 |
def predict(prompt, guidance, steps, seed=1231231):
|
|
|
12 |
import gradio as gr
|
13 |
import psutil
|
14 |
import time
|
15 |
+
from sfast.compilers.stable_diffusion_pipeline_compiler import (
|
16 |
+
compile,
|
17 |
+
CompilationConfig,
|
18 |
+
)
|
19 |
+
|
20 |
|
21 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
22 |
TORCH_COMPILE = os.environ.get("TORCH_COMPILE", None)
|
|
|
45 |
pipe = DiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", safety_checker=None)
|
46 |
|
47 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
|
|
48 |
pipe.unet.to(memory_format=torch.channels_last)
|
49 |
pipe.set_progress_bar_config(disable=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
pipe.load_lora_weights("latent-consistency/lcm-lora-sdv1-5")
|
51 |
pipe.fuse_lora()
|
52 |
+
pipe.to(device=torch_device, dtype=torch_dtype).to(device)
|
53 |
+
|
54 |
+
|
55 |
+
config = CompilationConfig.Default()
|
56 |
+
config.enable_xformers = True
|
57 |
+
config.enable_triton = True
|
58 |
+
config.enable_cuda_graph = True
|
59 |
+
pipe = compile(pipe, config=config)
|
60 |
|
61 |
|
62 |
def predict(prompt, guidance, steps, seed=1231231):
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
diffusers==0.
|
|
|
2 |
transformers==4.34.1
|
3 |
gradio==4.1.2
|
4 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
@@ -10,4 +11,6 @@ accelerate==0.24.0
|
|
10 |
compel==2.0.2
|
11 |
controlnet-aux==0.0.7
|
12 |
peft==0.6.0
|
13 |
-
|
|
|
|
|
|
1 |
+
# diffusers==0.22.2
|
2 |
+
git+https://github.com/huggingface/diffusers.git@6110d7c95f630479cf01340cc8a8141c1e359f09
|
3 |
transformers==4.34.1
|
4 |
gradio==4.1.2
|
5 |
--extra-index-url https://download.pytorch.org/whl/cu121
|
|
|
11 |
compel==2.0.2
|
12 |
controlnet-aux==0.0.7
|
13 |
peft==0.6.0
|
14 |
+
stable_fast @ https://github.com/chengzeyi/stable-fast/releases/download/v0.0.15.post1/stable_fast-0.0.15.post1+torch211cu121-cp310-cp310-manylinux2014_x86_64.whl
|
15 |
+
xformers
|
16 |
+
triton
|