Spaces:
Runtime error
Runtime error
AdamOswald1
commited on
Commit
•
cf62a19
1
Parent(s):
60b0d5d
Update app.py
Browse files
app.py
CHANGED
@@ -76,7 +76,7 @@ current_model_path = current_model.path
|
|
76 |
if is_colab:
|
77 |
pipe = StableDiffusionPipeline.from_pretrained(
|
78 |
current_model.path,
|
79 |
-
torch_dtype=torch.
|
80 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
81 |
safety_checker=lambda images, clip_input: (images, False)
|
82 |
)
|
@@ -84,13 +84,20 @@ if is_colab:
|
|
84 |
else:
|
85 |
pipe = StableDiffusionPipeline.from_pretrained(
|
86 |
current_model.path,
|
87 |
-
torch_dtype=torch.
|
88 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
89 |
)
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
96 |
|
@@ -165,22 +172,29 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
|
|
165 |
if is_colab or current_model == custom_model:
|
166 |
pipe = StableDiffusionPipeline.from_pretrained(
|
167 |
current_model_path,
|
168 |
-
torch_dtype=torch.
|
169 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
170 |
safety_checker=lambda images, clip_input: (images, False)
|
171 |
)
|
172 |
else:
|
173 |
pipe = StableDiffusionPipeline.from_pretrained(
|
174 |
current_model_path,
|
175 |
-
torch_dtype=torch.
|
176 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
177 |
)
|
178 |
# pipe = pipe.to("cpu")
|
179 |
# pipe = current_model.pipe_t2i
|
180 |
|
|
|
|
|
|
|
|
|
181 |
if torch.cuda.is_available():
|
182 |
pipe = pipe.to("cuda")
|
183 |
pipe.enable_xformers_memory_efficient_attention()
|
|
|
|
|
|
|
184 |
last_mode = "txt2img"
|
185 |
|
186 |
prompt = current_model.prefix + prompt
|
@@ -214,22 +228,29 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
|
|
214 |
if is_colab or current_model == custom_model:
|
215 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
216 |
current_model_path,
|
217 |
-
torch_dtype=torch.
|
218 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
219 |
safety_checker=lambda images, clip_input: (images, False)
|
220 |
)
|
221 |
else:
|
222 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
223 |
current_model_path,
|
224 |
-
torch_dtype=torch.
|
225 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
226 |
)
|
227 |
# pipe = pipe.to("cpu")
|
228 |
# pipe = current_model.pipe_i2i
|
229 |
|
|
|
|
|
|
|
|
|
230 |
if torch.cuda.is_available():
|
231 |
pipe = pipe.to("cuda")
|
232 |
pipe.enable_xformers_memory_efficient_attention()
|
|
|
|
|
|
|
233 |
last_mode = "img2img"
|
234 |
|
235 |
prompt = current_model.prefix + prompt
|
|
|
76 |
if is_colab:
|
77 |
pipe = StableDiffusionPipeline.from_pretrained(
|
78 |
current_model.path,
|
79 |
+
torch_dtype=torch.get_default_dtype(),
|
80 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
81 |
safety_checker=lambda images, clip_input: (images, False)
|
82 |
)
|
|
|
84 |
else:
|
85 |
pipe = StableDiffusionPipeline.from_pretrained(
|
86 |
current_model.path,
|
87 |
+
torch_dtype=torch.get_default_dtype(),
|
88 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
89 |
)
|
90 |
|
91 |
+
to_cuda(torch, pipe)
|
92 |
+
|
93 |
+
def to_cuda(torch, pipe):
|
94 |
+
try:
|
95 |
+
if torch.cuda.is_available():
|
96 |
+
pipe = pipe.to("cuda")
|
97 |
+
pipe.enable_xformers_memory_efficient_attention()
|
98 |
+
return True
|
99 |
+
except:
|
100 |
+
return False
|
101 |
|
102 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
103 |
|
|
|
172 |
if is_colab or current_model == custom_model:
|
173 |
pipe = StableDiffusionPipeline.from_pretrained(
|
174 |
current_model_path,
|
175 |
+
torch_dtype=torch.get_default_dtype(),
|
176 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
177 |
safety_checker=lambda images, clip_input: (images, False)
|
178 |
)
|
179 |
else:
|
180 |
pipe = StableDiffusionPipeline.from_pretrained(
|
181 |
current_model_path,
|
182 |
+
torch_dtype=torch.get_default_dtype(),
|
183 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
184 |
)
|
185 |
# pipe = pipe.to("cpu")
|
186 |
# pipe = current_model.pipe_t2i
|
187 |
|
188 |
+
to_cuda(torch, pipe)
|
189 |
+
|
190 |
+
def to_cuda(torch, pipe):
|
191 |
+
try:
|
192 |
if torch.cuda.is_available():
|
193 |
pipe = pipe.to("cuda")
|
194 |
pipe.enable_xformers_memory_efficient_attention()
|
195 |
+
return True
|
196 |
+
except:
|
197 |
+
return False
|
198 |
last_mode = "txt2img"
|
199 |
|
200 |
prompt = current_model.prefix + prompt
|
|
|
228 |
if is_colab or current_model == custom_model:
|
229 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
230 |
current_model_path,
|
231 |
+
torch_dtype=torch.get_default_dtype(),
|
232 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
|
233 |
safety_checker=lambda images, clip_input: (images, False)
|
234 |
)
|
235 |
else:
|
236 |
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
237 |
current_model_path,
|
238 |
+
torch_dtype=torch.get_default_dtype(),
|
239 |
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
|
240 |
)
|
241 |
# pipe = pipe.to("cpu")
|
242 |
# pipe = current_model.pipe_i2i
|
243 |
|
244 |
+
to_cuda(torch, pipe)
|
245 |
+
|
246 |
+
def to_cuda(torch, pipe):
|
247 |
+
try:
|
248 |
if torch.cuda.is_available():
|
249 |
pipe = pipe.to("cuda")
|
250 |
pipe.enable_xformers_memory_efficient_attention()
|
251 |
+
return True
|
252 |
+
except:
|
253 |
+
return False
|
254 |
last_mode = "img2img"
|
255 |
|
256 |
prompt = current_model.prefix + prompt
|