Spaces:
Running
on
A100
Running
on
A100
bring compile back
Browse files- app-controlnet.py +3 -3
- app-txt2img.py +3 -3
app-controlnet.py
CHANGED
@@ -100,9 +100,9 @@ pipe.unet.to(memory_format=torch.channels_last)
|
|
100 |
if psutil.virtual_memory().total < 64 * 1024**3:
|
101 |
pipe.enable_attention_slicing()
|
102 |
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
|
107 |
compel_proc = Compel(
|
108 |
tokenizer=pipe.tokenizer,
|
|
|
100 |
if psutil.virtual_memory().total < 64 * 1024**3:
|
101 |
pipe.enable_attention_slicing()
|
102 |
|
103 |
+
if not mps_available and not xpu_available:
|
104 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
105 |
+
pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
|
106 |
|
107 |
compel_proc = Compel(
|
108 |
tokenizer=pipe.tokenizer,
|
app-txt2img.py
CHANGED
@@ -76,9 +76,9 @@ pipe.unet.to(memory_format=torch.channels_last)
|
|
76 |
if psutil.virtual_memory().total < 64 * 1024**3:
|
77 |
pipe.enable_attention_slicing()
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
|
83 |
compel_proc = Compel(
|
84 |
tokenizer=pipe.tokenizer,
|
|
|
76 |
if psutil.virtual_memory().total < 64 * 1024**3:
|
77 |
pipe.enable_attention_slicing()
|
78 |
|
79 |
+
if not mps_available and not xpu_available:
|
80 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
81 |
+
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
82 |
|
83 |
compel_proc = Compel(
|
84 |
tokenizer=pipe.tokenizer,
|