Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -67,7 +67,7 @@ else: # download all models
|
|
67 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
|
68 |
for model in models:
|
69 |
try:
|
70 |
-
print(f"{datetime.datetime.now()} Downloading {model.name}...")
|
71 |
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
|
72 |
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
73 |
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
@@ -107,7 +107,9 @@ def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0
|
|
107 |
else:
|
108 |
return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator)
|
109 |
|
110 |
-
def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator
|
|
|
|
|
111 |
|
112 |
global last_mode
|
113 |
global pipe
|
@@ -138,7 +140,9 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
|
|
138 |
|
139 |
return replace_nsfw_images(result)
|
140 |
|
141 |
-
def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator
|
|
|
|
|
142 |
|
143 |
global last_mode
|
144 |
global pipe
|
@@ -253,7 +257,7 @@ with gr.Blocks(css=css) as demo:
|
|
253 |
generate.click(inference, inputs=inputs, outputs=image_out)
|
254 |
|
255 |
ex = gr.Examples([
|
256 |
-
[models[
|
257 |
[models[4].name, "portrait of dwayne johnson", 7.0, 75],
|
258 |
[models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
|
259 |
[models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
|
@@ -271,8 +275,9 @@ with gr.Blocks(css=css) as demo:
|
|
271 |
</div>
|
272 |
""")
|
273 |
|
|
|
|
|
274 |
if not is_colab:
|
275 |
demo.queue(concurrency_count=1)
|
276 |
demo.launch(debug=is_colab, share=is_colab)
|
277 |
|
278 |
-
print(f"Space built in {time.time() - start_time:.2f} seconds")
|
|
|
67 |
vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
|
68 |
for model in models:
|
69 |
try:
|
70 |
+
print(f"{datetime.datetime.now()} Downloading {model.name} model...")
|
71 |
unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
|
72 |
model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
73 |
model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
|
|
|
107 |
else:
|
108 |
return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator)
|
109 |
|
110 |
+
def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
|
111 |
+
|
112 |
+
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
|
113 |
|
114 |
global last_mode
|
115 |
global pipe
|
|
|
140 |
|
141 |
return replace_nsfw_images(result)
|
142 |
|
143 |
+
def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
144 |
+
|
145 |
+
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
|
146 |
|
147 |
global last_mode
|
148 |
global pipe
|
|
|
257 |
generate.click(inference, inputs=inputs, outputs=image_out)
|
258 |
|
259 |
ex = gr.Examples([
|
260 |
+
[models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 50],
|
261 |
[models[4].name, "portrait of dwayne johnson", 7.0, 75],
|
262 |
[models[5].name, "portrait of a beautiful alyx vance half life", 10, 50],
|
263 |
[models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 45],
|
|
|
275 |
</div>
|
276 |
""")
|
277 |
|
278 |
+
print(f"Space built in {time.time() - start_time:.2f} seconds")
|
279 |
+
|
280 |
if not is_colab:
|
281 |
demo.queue(concurrency_count=1)
|
282 |
demo.launch(debug=is_colab, share=is_colab)
|
283 |
|
|