JunhaoZhuang commited on
Commit
a5beba4
1 Parent(s): ffbcd5c
Files changed (1) hide show
  1. app.py +7 -1
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import contextlib
2
  import gc
3
  import json
@@ -147,6 +148,7 @@ examples = [
147
  global pipeline
148
  global MultiResNetModel
149
 
 
150
  def load_ckpt(input_style):
151
  global pipeline
152
  global MultiResNetModel
@@ -236,7 +238,7 @@ load_ckpt(cur_input_style)
236
  cur_input_style = "GrayImage(ScreenStyle)"
237
  load_ckpt(cur_input_style)
238
 
239
-
240
  def fix_random_seeds(seed):
241
  random.seed(seed)
242
  np.random.seed(seed)
@@ -252,6 +254,7 @@ def process_multi_images(files):
252
  imgs.append(img)
253
  return imgs
254
 
 
255
  def extract_lines(image):
256
  src = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
257
 
@@ -276,6 +279,7 @@ def extract_lines(image):
276
  torch.cuda.empty_cache()
277
  return outimg
278
 
 
279
  def to_screen_image(input_image):
280
  global opt
281
  global ScreenModel
@@ -291,6 +295,7 @@ def to_screen_image(input_image):
291
  torch.cuda.empty_cache()
292
  return image_pil
293
 
 
294
  def extract_line_image(query_image_, input_style, resolution):
295
  if resolution == "640x640":
296
  tar_width = 640
@@ -317,6 +322,7 @@ def extract_line_image(query_image_, input_style, resolution):
317
  torch.cuda.empty_cache()
318
  return input_context, extracted_line, input_context
319
 
 
320
  def colorize_image(VAE_input, input_context, reference_images, resolution, seed, input_style, num_inference_steps):
321
  if VAE_input is None or input_context is None:
322
  gr.Info("Please preprocess the image first")
 
1
+ import spaces
2
  import contextlib
3
  import gc
4
  import json
 
148
  global pipeline
149
  global MultiResNetModel
150
 
151
+ @spaces.GPU
152
  def load_ckpt(input_style):
153
  global pipeline
154
  global MultiResNetModel
 
238
  cur_input_style = "GrayImage(ScreenStyle)"
239
  load_ckpt(cur_input_style)
240
 
241
+ @spaces.GPU
242
  def fix_random_seeds(seed):
243
  random.seed(seed)
244
  np.random.seed(seed)
 
254
  imgs.append(img)
255
  return imgs
256
 
257
+ @spaces.GPU
258
  def extract_lines(image):
259
  src = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
260
 
 
279
  torch.cuda.empty_cache()
280
  return outimg
281
 
282
+ @spaces.GPU
283
  def to_screen_image(input_image):
284
  global opt
285
  global ScreenModel
 
295
  torch.cuda.empty_cache()
296
  return image_pil
297
 
298
+ @spaces.GPU
299
  def extract_line_image(query_image_, input_style, resolution):
300
  if resolution == "640x640":
301
  tar_width = 640
 
322
  torch.cuda.empty_cache()
323
  return input_context, extracted_line, input_context
324
 
325
+ @spaces.GPU(duration=180)
326
  def colorize_image(VAE_input, input_context, reference_images, resolution, seed, input_style, num_inference_steps):
327
  if VAE_input is None or input_context is None:
328
  gr.Info("Please preprocess the image first")