Spaces:
Runtime error
Runtime error
edit import
Browse files- Demo_TFR_Pirenderer/src/audio2pose_models/audio2pose.py +3 -3
- Demo_TFR_Pirenderer/src/audio2pose_models/cvae.py +1 -1
- Demo_TFR_Pirenderer/src/audio2pose_models/res_unet.py +1 -1
- Demo_TFR_Pirenderer/src/face3d/models/__init__.py +1 -1
- Demo_TFR_Pirenderer/src/face3d/models/bfm.py +1 -1
- Demo_TFR_Pirenderer/src/face3d/models/facerecon_model.py +7 -7
- Demo_TFR_Pirenderer/src/face3d/util/__init__.py +1 -1
- Demo_TFR_Pirenderer/src/face3d/visualize.py +2 -2
- Demo_TFR_Pirenderer/src/pirenderer/animate.py +2 -2
- Demo_TFR_Pirenderer/src/pirenderer/modules/face_model.py +2 -2
- Demo_TFR_Pirenderer/src/test_audio2coeff.py +3 -3
- Demo_TFR_Pirenderer/src/utils/audio.py +1 -1
- Demo_TFR_Pirenderer/src/utils/face_enhancer.py +1 -1
- Demo_TFR_Pirenderer/src/utils/paste_pic.py +1 -1
- Demo_TFR_Pirenderer/src/utils/preprocess.py +6 -6
Demo_TFR_Pirenderer/src/audio2pose_models/audio2pose.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import torch
|
2 |
from torch import nn
|
3 |
-
from src.audio2pose_models.cvae import CVAE
|
4 |
-
from src.audio2pose_models.discriminator import PoseSequenceDiscriminator
|
5 |
-
from src.audio2pose_models.audio_encoder import AudioEncoder
|
6 |
|
7 |
class Audio2Pose(nn.Module):
|
8 |
def __init__(self, cfg, wav2lip_checkpoint, device='cuda'):
|
|
|
1 |
import torch
|
2 |
from torch import nn
|
3 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.cvae import CVAE
|
4 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.discriminator import PoseSequenceDiscriminator
|
5 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.audio_encoder import AudioEncoder
|
6 |
|
7 |
class Audio2Pose(nn.Module):
|
8 |
def __init__(self, cfg, wav2lip_checkpoint, device='cuda'):
|
Demo_TFR_Pirenderer/src/audio2pose_models/cvae.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
from torch import nn
|
4 |
-
from src.audio2pose_models.res_unet import ResUnet
|
5 |
|
6 |
def class2onehot(idx, class_num):
|
7 |
|
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
from torch import nn
|
4 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.res_unet import ResUnet
|
5 |
|
6 |
def class2onehot(idx, class_num):
|
7 |
|
Demo_TFR_Pirenderer/src/audio2pose_models/res_unet.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
-
from src.audio2pose_models.networks import ResidualConv, Upsample
|
4 |
|
5 |
|
6 |
class ResUnet(nn.Module):
|
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.networks import ResidualConv, Upsample
|
4 |
|
5 |
|
6 |
class ResUnet(nn.Module):
|
Demo_TFR_Pirenderer/src/face3d/models/__init__.py
CHANGED
@@ -19,7 +19,7 @@ See our template model class 'template_model.py' for more details.
|
|
19 |
"""
|
20 |
|
21 |
import importlib
|
22 |
-
from src.face3d.models.base_model import BaseModel
|
23 |
|
24 |
|
25 |
def find_model_using_name(model_name):
|
|
|
19 |
"""
|
20 |
|
21 |
import importlib
|
22 |
+
from Demo_TFR_Pirenderer.src.face3d.models.base_model import BaseModel
|
23 |
|
24 |
|
25 |
def find_model_using_name(model_name):
|
Demo_TFR_Pirenderer/src/face3d/models/bfm.py
CHANGED
@@ -5,7 +5,7 @@ import numpy as np
|
|
5 |
import torch
|
6 |
import torch.nn.functional as F
|
7 |
from scipy.io import loadmat
|
8 |
-
from src.face3d.util.load_mats import transferBFM09
|
9 |
import os
|
10 |
|
11 |
def perspective_projection(focal, center):
|
|
|
5 |
import torch
|
6 |
import torch.nn.functional as F
|
7 |
from scipy.io import loadmat
|
8 |
+
from Demo_TFR_Pirenderer.src.face3d.util.load_mats import transferBFM09
|
9 |
import os
|
10 |
|
11 |
def perspective_projection(focal, center):
|
Demo_TFR_Pirenderer/src/face3d/models/facerecon_model.py
CHANGED
@@ -3,13 +3,13 @@
|
|
3 |
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
-
from src.face3d.models.base_model import BaseModel
|
7 |
-
from src.face3d.models import networks
|
8 |
-
from src.face3d.models.bfm import ParametricFaceModel
|
9 |
-
from src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss
|
10 |
-
from src.face3d.util import util
|
11 |
-
from src.face3d.util.nvdiffrast import MeshRenderer
|
12 |
-
# from src.face3d.util.preprocess import estimate_norm_torch
|
13 |
|
14 |
import trimesh
|
15 |
from scipy.io import savemat
|
|
|
3 |
|
4 |
import numpy as np
|
5 |
import torch
|
6 |
+
from Demo_TFR_Pirenderer.src.face3d.models.base_model import BaseModel
|
7 |
+
from Demo_TFR_Pirenderer.src.face3d.models import networks
|
8 |
+
from Demo_TFR_Pirenderer.src.face3d.models.bfm import ParametricFaceModel
|
9 |
+
from Demo_TFR_Pirenderer.src.face3d.models.losses import perceptual_loss, photo_loss, reg_loss, reflectance_loss, landmark_loss
|
10 |
+
from Demo_TFR_Pirenderer.src.face3d.util import util
|
11 |
+
from Demo_TFR_Pirenderer.src.face3d.util.nvdiffrast import MeshRenderer
|
12 |
+
# from Demo_TFR_Pirenderer.src.face3d.util.preprocess import estimate_norm_torch
|
13 |
|
14 |
import trimesh
|
15 |
from scipy.io import savemat
|
Demo_TFR_Pirenderer/src/face3d/util/__init__.py
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
"""This package includes a miscellaneous collection of useful helper functions."""
|
2 |
-
from src.face3d.util import *
|
3 |
|
|
|
1 |
"""This package includes a miscellaneous collection of useful helper functions."""
|
2 |
+
from Demo_TFR_Pirenderer.src.face3d.util import *
|
3 |
|
Demo_TFR_Pirenderer/src/face3d/visualize.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
# check the sync of 3dmm feature and the audio
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
-
from src.face3d.models.bfm import ParametricFaceModel
|
5 |
-
from src.face3d.models.facerecon_model import FaceReconModel
|
6 |
import torch
|
7 |
import subprocess, platform
|
8 |
import scipy.io as scio
|
|
|
1 |
# check the sync of 3dmm feature and the audio
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
+
from Demo_TFR_Pirenderer.src.face3d.models.bfm import ParametricFaceModel
|
5 |
+
from Demo_TFR_Pirenderer.src.face3d.models.facerecon_model import FaceReconModel
|
6 |
import torch
|
7 |
import subprocess, platform
|
8 |
import scipy.io as scio
|
Demo_TFR_Pirenderer/src/pirenderer/animate.py
CHANGED
@@ -13,8 +13,8 @@ import torch
|
|
13 |
import torchvision
|
14 |
|
15 |
from pydub import AudioSegment
|
16 |
-
from src.pirenderer.modules.face_model import FaceGenerator
|
17 |
-
from src.utils.videoio import save_video_with_watermark
|
18 |
|
19 |
class AnimateFromCoeff():
|
20 |
|
|
|
13 |
import torchvision
|
14 |
|
15 |
from pydub import AudioSegment
|
16 |
+
from Demo_TFR_Pirenderer.src.pirenderer.modules.face_model import FaceGenerator
|
17 |
+
from Demo_TFR_Pirenderer.src.utils.videoio import save_video_with_watermark
|
18 |
|
19 |
class AnimateFromCoeff():
|
20 |
|
Demo_TFR_Pirenderer/src/pirenderer/modules/face_model.py
CHANGED
@@ -7,8 +7,8 @@ import torch.nn.functional as F
|
|
7 |
import sys
|
8 |
sys.path.append("..")
|
9 |
|
10 |
-
from src.pirenderer.util import flow_util
|
11 |
-
from src.pirenderer.modules.base_function import LayerNorm2d, ADAINHourglass, FineEncoder, FineDecoder
|
12 |
|
13 |
|
14 |
class FaceGenerator(nn.Module):
|
|
|
7 |
import sys
|
8 |
sys.path.append("..")
|
9 |
|
10 |
+
from Demo_TFR_Pirenderer.src.pirenderer.util import flow_util
|
11 |
+
from Demo_TFR_Pirenderer.src.pirenderer.modules.base_function import LayerNorm2d, ADAINHourglass, FineEncoder, FineDecoder
|
12 |
|
13 |
|
14 |
class FaceGenerator(nn.Module):
|
Demo_TFR_Pirenderer/src/test_audio2coeff.py
CHANGED
@@ -5,9 +5,9 @@ from scipy.io import savemat, loadmat
|
|
5 |
from yacs.config import CfgNode as CN
|
6 |
from scipy.signal import savgol_filter
|
7 |
|
8 |
-
from src.audio2pose_models.audio2pose import Audio2Pose
|
9 |
-
from src.audio2exp_models.networks import SimpleWrapperV2
|
10 |
-
from src.audio2exp_models.audio2exp import Audio2Exp
|
11 |
|
12 |
def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"):
|
13 |
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
|
|
|
5 |
from yacs.config import CfgNode as CN
|
6 |
from scipy.signal import savgol_filter
|
7 |
|
8 |
+
from Demo_TFR_Pirenderer.src.audio2pose_models.audio2pose import Audio2Pose
|
9 |
+
from Demo_TFR_Pirenderer.src.audio2exp_models.networks import SimpleWrapperV2
|
10 |
+
from Demo_TFR_Pirenderer.src.audio2exp_models.audio2exp import Audio2Exp
|
11 |
|
12 |
def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"):
|
13 |
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
|
Demo_TFR_Pirenderer/src/utils/audio.py
CHANGED
@@ -4,7 +4,7 @@ import numpy as np
|
|
4 |
# import tensorflow as tf
|
5 |
from scipy import signal
|
6 |
from scipy.io import wavfile
|
7 |
-
from src.utils.hparams import hparams as hp
|
8 |
|
9 |
def load_wav(path, sr):
|
10 |
return librosa.core.load(path, sr=sr)[0]
|
|
|
4 |
# import tensorflow as tf
|
5 |
from scipy import signal
|
6 |
from scipy.io import wavfile
|
7 |
+
from Demo_TFR_Pirenderer.src.utils.hparams import hparams as hp
|
8 |
|
9 |
def load_wav(path, sr):
|
10 |
return librosa.core.load(path, sr=sr)[0]
|
Demo_TFR_Pirenderer/src/utils/face_enhancer.py
CHANGED
@@ -5,7 +5,7 @@ from gfpgan import GFPGANer
|
|
5 |
|
6 |
from tqdm import tqdm
|
7 |
|
8 |
-
from src.utils.videoio import load_video_to_cv2
|
9 |
|
10 |
import cv2
|
11 |
|
|
|
5 |
|
6 |
from tqdm import tqdm
|
7 |
|
8 |
+
from Demo_TFR_Pirenderer.src.utils.videoio import load_video_to_cv2
|
9 |
|
10 |
import cv2
|
11 |
|
Demo_TFR_Pirenderer/src/utils/paste_pic.py
CHANGED
@@ -3,7 +3,7 @@ import numpy as np
|
|
3 |
from tqdm import tqdm
|
4 |
import uuid
|
5 |
|
6 |
-
from src.utils.videoio import save_video_with_watermark
|
7 |
|
8 |
def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path):
|
9 |
|
|
|
3 |
from tqdm import tqdm
|
4 |
import uuid
|
5 |
|
6 |
+
from Demo_TFR_Pirenderer.src.utils.videoio import save_video_with_watermark
|
7 |
|
8 |
def paste_pic(video_path, pic_path, crop_info, new_audio_path, full_video_path):
|
9 |
|
Demo_TFR_Pirenderer/src/utils/preprocess.py
CHANGED
@@ -4,19 +4,19 @@ from tqdm import tqdm
|
|
4 |
from PIL import Image
|
5 |
|
6 |
# 3dmm extraction
|
7 |
-
from src.face3d.util.preprocess import align_img
|
8 |
-
from src.face3d.util.load_mats import load_lm3d
|
9 |
-
from src.face3d.models import networks
|
10 |
|
11 |
try:
|
12 |
import webui
|
13 |
-
from src.face3d.extract_kp_videos_safe import KeypointExtractor
|
14 |
assert torch.cuda.is_available() == True
|
15 |
except:
|
16 |
-
from src.face3d.extract_kp_videos import KeypointExtractor
|
17 |
|
18 |
from scipy.io import loadmat, savemat
|
19 |
-
from src.utils.croper import Croper
|
20 |
|
21 |
import warnings
|
22 |
warnings.filterwarnings("ignore")
|
|
|
4 |
from PIL import Image
|
5 |
|
6 |
# 3dmm extraction
|
7 |
+
from Demo_TFR_Pirenderer.src.face3d.util.preprocess import align_img
|
8 |
+
from Demo_TFR_Pirenderer.src.face3d.util.load_mats import load_lm3d
|
9 |
+
from Demo_TFR_Pirenderer.src.face3d.models import networks
|
10 |
|
11 |
try:
|
12 |
import webui
|
13 |
+
from Demo_TFR_Pirenderer.src.face3d.extract_kp_videos_safe import KeypointExtractor
|
14 |
assert torch.cuda.is_available() == True
|
15 |
except:
|
16 |
+
from Demo_TFR_Pirenderer.src.face3d.extract_kp_videos import KeypointExtractor
|
17 |
|
18 |
from scipy.io import loadmat, savemat
|
19 |
+
from Demo_TFR_Pirenderer.src.utils.croper import Croper
|
20 |
|
21 |
import warnings
|
22 |
warnings.filterwarnings("ignore")
|