YulianSa commited on
Commit
5e6f2af
·
1 Parent(s): 26e52a5
Files changed (2) hide show
  1. infer_api.py +3 -4
  2. infer_multiview.py +1 -0
infer_api.py CHANGED
@@ -277,6 +277,9 @@ def run_multiview_infer(data, pipeline, cfg: TestConfig, num_levels=3):
277
  generator = None
278
  else:
279
  generator = torch.Generator(device=pipeline.unet.device).manual_seed(cfg.seed)
 
 
 
280
 
281
  images_cond = []
282
  results = {}
@@ -337,14 +340,11 @@ def run_multiview_infer(data, pipeline, cfg: TestConfig, num_levels=3):
337
  return results
338
 
339
 
340
- @spaces.GPU
341
  def load_multiview_pipeline(cfg):
342
  pipeline = StableUnCLIPImg2ImgPipeline.from_pretrained(
343
  cfg.pretrained_path,
344
  torch_dtype=torch.float16,)
345
  pipeline.unet.enable_xformers_memory_efficient_attention()
346
- if torch.cuda.is_available():
347
- pipeline.to(device)
348
  return pipeline
349
 
350
 
@@ -761,7 +761,6 @@ class InferSlrmAPI:
761
 
762
  return mesh_fpath
763
 
764
- @spaces.GPU
765
  class InferMultiviewAPI:
766
  def __init__(self, config):
767
  parser = argparse.ArgumentParser()
 
277
  generator = None
278
  else:
279
  generator = torch.Generator(device=pipeline.unet.device).manual_seed(cfg.seed)
280
+
281
+ if torch.cuda.is_available():
282
+ pipeline.to(device)
283
 
284
  images_cond = []
285
  results = {}
 
340
  return results
341
 
342
 
 
343
  def load_multiview_pipeline(cfg):
344
  pipeline = StableUnCLIPImg2ImgPipeline.from_pretrained(
345
  cfg.pretrained_path,
346
  torch_dtype=torch.float16,)
347
  pipeline.unet.enable_xformers_memory_efficient_attention()
 
 
348
  return pipeline
349
 
350
 
 
761
 
762
  return mesh_fpath
763
 
 
764
  class InferMultiviewAPI:
765
  def __init__(self, config):
766
  parser = argparse.ArgumentParser()
infer_multiview.py CHANGED
@@ -227,6 +227,7 @@ def load_multiview_pipeline(cfg):
227
  cfg.pretrained_path,
228
  torch_dtype=torch.float16,)
229
  pipeline.unet.enable_xformers_memory_efficient_attention()
 
230
  if torch.cuda.is_available():
231
  pipeline.to(device)
232
  return pipeline
 
227
  cfg.pretrained_path,
228
  torch_dtype=torch.float16,)
229
  pipeline.unet.enable_xformers_memory_efficient_attention()
230
+ import pdb; pdb.set_trace()
231
  if torch.cuda.is_available():
232
  pipeline.to(device)
233
  return pipeline