Spaces:
Runtime error
Runtime error
Jim Eric Skogman
commited on
Commit
•
c90d6f6
1
Parent(s):
d41f3b7
Switch to SDXL and add PEFT
Browse files- inference.py +8 -4
- requirements.txt +1 -0
inference.py
CHANGED
@@ -43,8 +43,8 @@ class timer:
|
|
43 |
print(f"{self.method} took {str(round(end - self.start, 2))}s")
|
44 |
|
45 |
|
46 |
-
def load_models(model_id="
|
47 |
-
from diffusers import AutoPipelineForImage2Image, LCMScheduler
|
48 |
from diffusers.utils import load_image
|
49 |
|
50 |
if not is_mac:
|
@@ -52,11 +52,14 @@ def load_models(model_id="Lykon/dreamshaper-7"):
|
|
52 |
|
53 |
use_fp16 = should_use_fp16()
|
54 |
|
55 |
-
|
|
|
|
|
56 |
|
57 |
if use_fp16:
|
58 |
pipe = AutoPipelineForImage2Image.from_pretrained(
|
59 |
model_id,
|
|
|
60 |
cache_dir=cache_path,
|
61 |
torch_dtype=torch.float16,
|
62 |
variant="fp16",
|
@@ -65,13 +68,14 @@ def load_models(model_id="Lykon/dreamshaper-7"):
|
|
65 |
else:
|
66 |
pipe = AutoPipelineForImage2Image.from_pretrained(
|
67 |
model_id,
|
|
|
68 |
cache_dir=cache_path,
|
69 |
safety_checker=None
|
70 |
)
|
71 |
|
72 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
73 |
|
74 |
-
pipe.load_lora_weights(
|
75 |
pipe.fuse_lora()
|
76 |
|
77 |
device = "mps" if is_mac else "cuda"
|
|
|
43 |
print(f"{self.method} took {str(round(end - self.start, 2))}s")
|
44 |
|
45 |
|
46 |
+
def load_models(model_id="stabilityai/stable-diffusion-xl-base-1.0"):
|
47 |
+
from diffusers import UNet2DConditionModel, AutoPipelineForImage2Image, LCMScheduler
|
48 |
from diffusers.utils import load_image
|
49 |
|
50 |
if not is_mac:
|
|
|
52 |
|
53 |
use_fp16 = should_use_fp16()
|
54 |
|
55 |
+
lora_id = "artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
|
56 |
+
|
57 |
+
unet = UNet2DConditionModel.from_pretrained("latent-consistency/lcm-sdxl", torch_dtype=torch.float16, variant="fp16")
|
58 |
|
59 |
if use_fp16:
|
60 |
pipe = AutoPipelineForImage2Image.from_pretrained(
|
61 |
model_id,
|
62 |
+
unet=unet,
|
63 |
cache_dir=cache_path,
|
64 |
torch_dtype=torch.float16,
|
65 |
variant="fp16",
|
|
|
68 |
else:
|
69 |
pipe = AutoPipelineForImage2Image.from_pretrained(
|
70 |
model_id,
|
71 |
+
unet=unet,
|
72 |
cache_dir=cache_path,
|
73 |
safety_checker=None
|
74 |
)
|
75 |
|
76 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
77 |
|
78 |
+
pipe.load_lora_weights(lora_id)
|
79 |
pipe.fuse_lora()
|
80 |
|
81 |
device = "mps" if is_mac else "cuda"
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ flask
|
|
3 |
torch
|
4 |
transformers
|
5 |
accelerate
|
|
|
6 |
pillow
|
|
|
3 |
torch
|
4 |
transformers
|
5 |
accelerate
|
6 |
+
peft
|
7 |
pillow
|