Rooni commited on
Commit
5703022
·
1 Parent(s): 8e12618

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -72,7 +72,7 @@ else:
72
  )
73
 
74
  if torch.cuda.is_available():
75
- pipe = pipe.to("cuda")
76
  pipe.enable_xformers_memory_efficient_attention()
77
 
78
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
@@ -123,7 +123,7 @@ def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height
123
  if seed == 0:
124
  seed = random.randint(0, 2147483647)
125
 
126
- generator = torch.Generator('cuda').manual_seed(seed)
127
 
128
  try:
129
  if img is not None:
@@ -162,7 +162,7 @@ def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width,
162
  # pipe = current_model.pipe_t2i
163
 
164
  if torch.cuda.is_available():
165
- pipe = pipe.to("cuda")
166
  pipe.enable_xformers_memory_efficient_attention()
167
  last_mode = "txt2img"
168
 
@@ -211,7 +211,7 @@ def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance
211
  # pipe = current_model.pipe_i2i
212
 
213
  if torch.cuda.is_available():
214
- pipe = pipe.to("cuda")
215
  pipe.enable_xformers_memory_efficient_attention()
216
  last_mode = "img2img"
217
 
 
72
  )
73
 
74
  if torch.cuda.is_available():
75
+ pipe = pipe.to("cpu")
76
  pipe.enable_xformers_memory_efficient_attention()
77
 
78
  device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
 
123
  if seed == 0:
124
  seed = random.randint(0, 2147483647)
125
 
126
+ generator = torch.Generator('cpu').manual_seed(seed)
127
 
128
  try:
129
  if img is not None:
 
162
  # pipe = current_model.pipe_t2i
163
 
164
  if torch.cuda.is_available():
165
+ pipe = pipe.to("cpu")
166
  pipe.enable_xformers_memory_efficient_attention()
167
  last_mode = "txt2img"
168
 
 
211
  # pipe = current_model.pipe_i2i
212
 
213
  if torch.cuda.is_available():
214
+ pipe = pipe.to("cpu")
215
  pipe.enable_xformers_memory_efficient_attention()
216
  last_mode = "img2img"
217