Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -25,19 +25,8 @@ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
|
25 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
26 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
27 |
|
28 |
-
|
29 |
-
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium", torch_dtype=torch.float16, use_auth_token=huggingface_token)
|
30 |
|
31 |
-
if ENABLE_CPU_OFFLOAD:
|
32 |
-
pipe.enable_model_cpu_offload()
|
33 |
-
else:
|
34 |
-
pipe.to(device)
|
35 |
-
print("Loaded on Device!")
|
36 |
-
|
37 |
-
if USE_TORCH_COMPILE:
|
38 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
39 |
-
print("Model Compiled!")
|
40 |
-
|
41 |
|
42 |
def save_image(img):
|
43 |
unique_name = str(uuid.uuid4()) + ".png"
|
@@ -84,7 +73,6 @@ def generate(
|
|
84 |
num_inference_steps=num_inference_steps,
|
85 |
generator=generator,
|
86 |
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
|
87 |
-
use_resolution_binning=use_resolution_binning,
|
88 |
output_type="pil",
|
89 |
).images
|
90 |
|
|
|
25 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
26 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
27 |
|
28 |
+
pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium", torch_dtype=torch.float16, use_auth_token=huggingface_token)
|
|
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
def save_image(img):
|
32 |
unique_name = str(uuid.uuid4()) + ".png"
|
|
|
73 |
num_inference_steps=num_inference_steps,
|
74 |
generator=generator,
|
75 |
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
|
|
|
76 |
output_type="pil",
|
77 |
).images
|
78 |
|