czl commited on
Commit
067a15e
1 Parent(s): a6328e5

fix xformers

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. tools/synth.py +1 -0
app.py CHANGED
@@ -11,7 +11,7 @@ from transformers import CLIPModel, CLIPProcessor
11
 
12
  from tools import synth
13
 
14
- device = "cuda" # if torch.cuda.is_available() else "cpu"
15
  model_path = "runwayml/stable-diffusion-v1-5"
16
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
17
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
 
11
 
12
  from tools import synth
13
 
14
+ device = "cuda" if torch.cuda.is_available() else "cpu"
15
  model_path = "runwayml/stable-diffusion-v1-5"
16
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
17
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
tools/synth.py CHANGED
@@ -169,6 +169,7 @@ def pipe_img(
169
  ) # lower is faster but lower quality
170
  helper.enable()
171
  if torch.cuda.is_available():
 
172
  pipe.enable_xformers_memory_efficient_attention()
173
  if use_torchcompile:
174
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
169
  ) # lower is faster but lower quality
170
  helper.enable()
171
  if torch.cuda.is_available():
172
+ pipe.to("cuda")
173
  pipe.enable_xformers_memory_efficient_attention()
174
  if use_torchcompile:
175
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)