cocktailpeanut commited on
Commit
1aec1ce
1 Parent(s): ce91763
Files changed (3) hide show
  1. app.py +13 -3
  2. src/utils/frame_interpolation.py +2 -1
  3. src/utils/util.py +2 -1
app.py CHANGED
@@ -32,7 +32,8 @@ from src.audio2vid import get_headpose_temp, smooth_pose_seq
32
  from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
33
 
34
  if torch.backends.mps.is_available():
35
- device = "mps"
 
36
  elif torch.cuda.is_available():
37
  device = "cuda"
38
  else:
@@ -44,6 +45,9 @@ if config.weight_dtype == "fp16":
44
  weight_dtype = torch.float16
45
  else:
46
  weight_dtype = torch.float32
 
 
 
47
 
48
  audio_infer_config = OmegaConf.load(config.audio_inference_config)
49
  # prepare model
@@ -378,7 +382,10 @@ with gr.Blocks() as demo:
378
  a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
379
 
380
  with gr.Row():
381
- a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
 
 
 
382
  a2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
383
 
384
  with gr.Row():
@@ -406,7 +413,10 @@ with gr.Blocks() as demo:
406
  v2v_source_video = gr.Video(label="Upload source video", sources="upload")
407
 
408
  with gr.Row():
409
- v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
 
 
 
410
  v2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
411
 
412
  with gr.Row():
 
32
  from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
33
 
34
  if torch.backends.mps.is_available():
35
+ #device = "mps"
36
+ device = "cpu"
37
  elif torch.cuda.is_available():
38
  device = "cuda"
39
  else:
 
45
  weight_dtype = torch.float16
46
  else:
47
  weight_dtype = torch.float32
48
+
49
+ if device == "cpu":
50
+ weight_dtype = torch.float32
51
 
52
  audio_infer_config = OmegaConf.load(config.audio_inference_config)
53
  # prepare model
 
382
  a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
383
 
384
  with gr.Row():
385
+ if device == "cpu":
386
+ a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
387
+ else:
388
+ a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
389
  a2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
390
 
391
  with gr.Row():
 
413
  v2v_source_video = gr.Video(label="Upload source video", sources="upload")
414
 
415
  with gr.Row():
416
+ if device == "cpu":
417
+ v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
418
+ else:
419
+ v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
420
  v2v_step_slider = gr.Slider(minimum=5, maximum=50, step=1, value=20, label="Steps (--steps)")
421
 
422
  with gr.Row():
src/utils/frame_interpolation.py CHANGED
@@ -6,7 +6,8 @@ import bisect
6
  import shutil
7
 
8
  if torch.backends.mps.is_available():
9
- device = "mps"
 
10
  elif torch.cuda.is_available():
11
  device = "cuda"
12
  else:
 
6
  import shutil
7
 
8
  if torch.backends.mps.is_available():
9
+ #device = "mps"
10
+ device = "cpu"
11
  elif torch.cuda.is_available():
12
  device = "cuda"
13
  else:
src/utils/util.py CHANGED
@@ -13,7 +13,8 @@ from einops import rearrange
13
  from PIL import Image
14
 
15
  if torch.backends.mps.is_available():
16
- device = "mps"
 
17
  elif torch.cuda.is_available():
18
  device = "cuda"
19
  else:
 
13
  from PIL import Image
14
 
15
  if torch.backends.mps.is_available():
16
+ device = "cpu"
17
+ #device = "mps"
18
  elif torch.cuda.is_available():
19
  device = "cuda"
20
  else: