ameerazam08 commited on
Commit
ba32b3e
·
verified ·
1 Parent(s): 5e4d641

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. .gitignore +2 -0
  3. DEMO/00018.png +3 -0
  4. DEMO/2.mp4 +0 -0
  5. DEMO/AdamKinzinger2_3.mp4 +3 -0
  6. DEMO/BobCorker_0.mp4 +3 -0
  7. DEMO/demo_img_1.jpg +3 -0
  8. DEMO/demo_img_2.jpg +3 -0
  9. DEMO/demo_img_3.jpg +3 -0
  10. DEMO/demo_img_4.jpg +3 -0
  11. DEMO/demo_img_5.jpg +3 -0
  12. DEMO/demo_video_1.mp4 +0 -0
  13. DEMO/demo_video_2.mp4 +0 -0
  14. DEMO/kohli.mp4 +3 -0
  15. DEMO/reference_frame.png +3 -0
  16. DEMO/salma.jpg +3 -0
  17. README.md +77 -0
  18. __pycache__/animate.cpython-38.pyc +0 -0
  19. __pycache__/logger.cpython-38.pyc +0 -0
  20. animate.py +34 -0
  21. augmentation.py +345 -0
  22. config/mix-resolution.yml +89 -0
  23. crop_portrait.py +145 -0
  24. demo.py +313 -0
  25. environment.yaml +106 -0
  26. frames_dataset.py +280 -0
  27. logger.py +194 -0
  28. media/Teaser_video.png +3 -0
  29. modules/__pycache__/dense_motion.cpython-36.pyc +0 -0
  30. modules/__pycache__/dense_motion.cpython-37.pyc +0 -0
  31. modules/__pycache__/dense_motion.cpython-38.pyc +0 -0
  32. modules/__pycache__/discriminator.cpython-36.pyc +0 -0
  33. modules/__pycache__/discriminator.cpython-37.pyc +0 -0
  34. modules/__pycache__/discriminator.cpython-38.pyc +0 -0
  35. modules/__pycache__/generator.cpython-36.pyc +0 -0
  36. modules/__pycache__/generator.cpython-37.pyc +0 -0
  37. modules/__pycache__/generator.cpython-38.pyc +0 -0
  38. modules/__pycache__/hopenet.cpython-36.pyc +0 -0
  39. modules/__pycache__/hopenet.cpython-37.pyc +0 -0
  40. modules/__pycache__/hopenet.cpython-38.pyc +0 -0
  41. modules/__pycache__/keypoint_detector.cpython-36.pyc +0 -0
  42. modules/__pycache__/keypoint_detector.cpython-37.pyc +0 -0
  43. modules/__pycache__/keypoint_detector.cpython-38.pyc +0 -0
  44. modules/__pycache__/model.cpython-36.pyc +0 -0
  45. modules/__pycache__/model.cpython-37.pyc +0 -0
  46. modules/__pycache__/model.cpython-38.pyc +0 -0
  47. modules/__pycache__/util.cpython-36.pyc +0 -0
  48. modules/__pycache__/util.cpython-37.pyc +0 -0
  49. modules/__pycache__/util.cpython-38.pyc +0 -0
  50. modules/dense_motion.py +128 -0
.gitattributes CHANGED
@@ -53,3 +53,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ DEMO/AdamKinzinger2_3.mp4 filter=lfs diff=lfs merge=lfs -text
57
+ DEMO/BobCorker_0.mp4 filter=lfs diff=lfs merge=lfs -text
58
+ DEMO/kohli.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ upsampler/data/390.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ upsampler/data/684.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ checkpoints/mix-train.pth.tar
2
+ results_hq.mp4
DEMO/00018.png ADDED

Git LFS Details

  • SHA256: 54d7de250441760a2e2fad71310ef574cd45f35f967a28e7bb2eadf11422198f
  • Pointer size: 130 Bytes
  • Size of remote file: 92.5 kB
DEMO/2.mp4 ADDED
Binary file (479 kB). View file
 
DEMO/AdamKinzinger2_3.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36d57156a133df0384ac8fb0e68ec26f46f4f11e35dde6f51feee1b549fd2430
3
+ size 2357112
DEMO/BobCorker_0.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7cda063d4b56b57da8af68412a705f27da2a66d81e572edb673400cbb6a4fd
3
+ size 7693563
DEMO/demo_img_1.jpg ADDED

Git LFS Details

  • SHA256: d5c36ba4bb4c01dab4deb4c14ac98f274d14998fb8400967f85a51515df36e6f
  • Pointer size: 130 Bytes
  • Size of remote file: 82.2 kB
DEMO/demo_img_2.jpg ADDED

Git LFS Details

  • SHA256: 7cab91f5f0a87b7a8f0733ef6c5e2c51593f2c0a9eed94698e9f630fdd7049c1
  • Pointer size: 130 Bytes
  • Size of remote file: 48 kB
DEMO/demo_img_3.jpg ADDED

Git LFS Details

  • SHA256: 36c936803d7821608f83c947a28370db2347504d0e7a63b79ff47f44444d33d8
  • Pointer size: 130 Bytes
  • Size of remote file: 57.2 kB
DEMO/demo_img_4.jpg ADDED

Git LFS Details

  • SHA256: 7d7eb9e63f2e02f4a71ae183788f16cd2686b95804199a1c5b59b1dc2a52ee55
  • Pointer size: 130 Bytes
  • Size of remote file: 57.7 kB
DEMO/demo_img_5.jpg ADDED

Git LFS Details

  • SHA256: 842d362062f17325f3e5079b30235beb6855ffff293bfed70faf7f9a40480b8c
  • Pointer size: 132 Bytes
  • Size of remote file: 1.21 MB
DEMO/demo_video_1.mp4 ADDED
Binary file (579 kB). View file
 
DEMO/demo_video_2.mp4 ADDED
Binary file (799 kB). View file
 
DEMO/kohli.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53528539cdc2332468ebd10234d7e6831be176fe7e93505f6beec2420e4c22e
3
+ size 1678730
DEMO/reference_frame.png ADDED

Git LFS Details

  • SHA256: 8a8709fe3de45055ca8770a9284eecd12328e6cd7a399d0c22a8ace8f1594696
  • Pointer size: 131 Bytes
  • Size of remote file: 175 kB
DEMO/salma.jpg ADDED

Git LFS Details

  • SHA256: 985a12f77ec465a48620c8b2f86e6ca2c5fe01929ea3e10b6c80e9e555c277b0
  • Pointer size: 130 Bytes
  • Size of remote file: 32.2 kB
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adaptive Super Resolution For One-Shot Talking-Head Generation
2
+ The repository for ICASSP2024 Adaptive Super Resolution For One-Shot Talking-Head Generation (AdaSR TalkingHead)
3
+
4
+ ## Abstract
5
+ The one-shot talking-head generation learns to synthesize a talking-head video with one source portrait image under the driving of same or different identity video. Usually these methods require plane-based pixel transformations via Jacobin matrices or facial image warps for novel poses generation. The constraints of using a single image source and pixel displacements often compromise the clarity of the synthesized images. Some methods try to improve the quality of synthesized videos by introducing additional super-resolution modules, but this will undoubtedly increase computational consumption and destroy the original data distribution. In this work, we propose an adaptive high-quality talking-head video generation method, which synthesizes high-resolution video without additional pre-trained modules. Specifically, inspired by existing super-resolution methods, we down-sample the one-shot source image, and then adaptively reconstruct high-frequency details via an encoder-decoder module, resulting in enhanced video clarity. Our method consistently improves the quality of generated videos through a straightforward yet effective strategy, substantiated by quantitative and qualitative evaluations. The code and demo video are available on: https://github.com/Songluchuan/AdaSR-TalkingHead/
6
+
7
+ ## Updates
8
+
9
+ - [03/2024] Inference code and pretrained model are released.
10
+ - [03/2024] Arxiv Link: https://arxiv.org/abs/2403.15944.
11
+ - [COMING] Super-resolution model (based on StyleGANEX and ESRGAN).
12
+ - [COMING] Train code and processed datasets.
13
+
14
+
15
+ ## Installation
16
+
17
+ **Clone this repo:**
18
+ ```bash
19
+ git clone git@github.com:Songluchuan/AdaSR-TalkingHead.git
20
+ cd AdaSR-TalkingHead
21
+ ```
22
+ **Dependencies:**
23
+
24
+ We have tested on:
25
+ - CUDA 11.3-11.6
26
+ - PyTorch 1.10.1
27
+ - Matplotlib 3.4.3; Matplotlib 3.4.2; opencv-python 4.7.0; scikit-learn 1.0; tqdm 4.62.3
28
+
29
+ ## Inference Code
30
+
31
+
32
+ 1. Download the pretrained model on google drive: https://drive.google.com/file/d/1g58uuAyZFdny9_twvbv0AHxB9-03koko/view?usp=sharing (it is trained on the HDTF dataset), and put it under checkpoints/<br>
33
+
34
+
35
+ 2. The demo video and reference image are under ```DEMO/```
36
+
37
+
38
+ 3. The inference code is in the ```run_demo.sh```, please run it with
39
+
40
+ ```
41
+ bash run_demo.sh
42
+ ```
43
+
44
+ 4. You can set different demo image and driven video in the ```run_demo.sh```
45
+ ```
46
+ --source_image DEMO/demo_img_3.jpg
47
+ ```
48
+ and
49
+ ```
50
+ --driving_video DEMO/demo_video_1.mp4
51
+ ```
52
+
53
+
54
+ ## Video
55
+ <div align="center">
56
+ <a href="https://www.youtube.com/watch?v=B_-3F51QmKE" target="_blank">
57
+ <img src="media/Teaser_video.png" alt="AdaSR Talking-Head" width="1120" style="height: auto;" />
58
+ </a>
59
+ </div>
60
+
61
+
62
+
63
+ ## Citation
64
+
65
+ ```bibtex
66
+ @inproceedings{song2024adaptive,
67
+ title={Adaptive Super Resolution for One-Shot Talking Head Generation},
68
+ author={Song, Luchuan and Liu, Pinxin and Yin, Guojun and Xu, Chenliang},
69
+ year={2024},
70
+ organization={IEEE International Conference on Acoustics, Speech, and Signal Processing}
71
+ }
72
+ ```
73
+
74
+ ## Acknowledgments
75
+
76
+ The code is mainly developed based on [styleGANEX](https://github.com/williamyang1991/StyleGANEX), [ESRGAN](https://github.com/xinntao/ESRGAN) and [unofficial face2vid](https://github.com/zhanglonghao1992/One-Shot_Free-View_Neural_Talking_Head_Synthesis). Thanks to the authors contribution.
77
+
__pycache__/animate.cpython-38.pyc ADDED
Binary file (1.22 kB). View file
 
__pycache__/logger.cpython-38.pyc ADDED
Binary file (7.28 kB). View file
 
animate.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from tqdm import tqdm
3
+
4
+ import torch
5
+ from torch.utils.data import DataLoader
6
+
7
+ from logger import Logger, Visualizer
8
+ import imageio
9
+ from scipy.spatial import ConvexHull
10
+ import numpy as np
11
+
12
+ from sync_batchnorm import DataParallelWithCallback
13
+
14
+ def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
15
+ use_relative_movement=False, use_relative_jacobian=False):
16
+ if adapt_movement_scale:
17
+ source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
18
+ driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
19
+ adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
20
+ else:
21
+ adapt_movement_scale = 1
22
+
23
+ kp_new = {k: v for k, v in kp_driving.items()}
24
+
25
+ if use_relative_movement:
26
+ kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
27
+ kp_value_diff *= adapt_movement_scale
28
+ kp_new['value'] = kp_value_diff + kp_source['value']
29
+
30
+ if use_relative_jacobian:
31
+ jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
32
+ kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
33
+
34
+ return kp_new
augmentation.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Code from https://github.com/hassony2/torch_videovision
3
+ """
4
+
5
+ import numbers
6
+
7
+ import random
8
+ import numpy as np
9
+ import PIL
10
+
11
+ from skimage.transform import resize, rotate
12
+ from skimage.util import pad
13
+ import torchvision
14
+
15
+ import warnings
16
+
17
+ from skimage import img_as_ubyte, img_as_float
18
+
19
+
20
+ def crop_clip(clip, min_h, min_w, h, w):
21
+ if isinstance(clip[0], np.ndarray):
22
+ cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
23
+
24
+ elif isinstance(clip[0], PIL.Image.Image):
25
+ cropped = [
26
+ img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip
27
+ ]
28
+ else:
29
+ raise TypeError('Expected numpy.ndarray or PIL.Image' +
30
+ 'but got list of {0}'.format(type(clip[0])))
31
+ return cropped
32
+
33
+
34
+ def pad_clip(clip, h, w):
35
+ im_h, im_w = clip[0].shape[:2]
36
+ pad_h = (0, 0) if h < im_h else ((h - im_h) // 2, (h - im_h + 1) // 2)
37
+ pad_w = (0, 0) if w < im_w else ((w - im_w) // 2, (w - im_w + 1) // 2)
38
+
39
+ return pad(clip, ((0, 0), pad_h, pad_w, (0, 0)), mode='edge')
40
+
41
+
42
+ def resize_clip(clip, size, interpolation='bilinear'):
43
+ if isinstance(clip[0], np.ndarray):
44
+ if isinstance(size, numbers.Number):
45
+ im_h, im_w, im_c = clip[0].shape
46
+ # Min spatial dim already matches minimal size
47
+ if (im_w <= im_h and im_w == size) or (im_h <= im_w
48
+ and im_h == size):
49
+ return clip
50
+ new_h, new_w = get_resize_sizes(im_h, im_w, size)
51
+ size = (new_w, new_h)
52
+ else:
53
+ size = size[1], size[0]
54
+
55
+ scaled = [
56
+ resize(img, size, order=1 if interpolation == 'bilinear' else 0, preserve_range=True,
57
+ mode='constant', anti_aliasing=True) for img in clip
58
+ ]
59
+ elif isinstance(clip[0], PIL.Image.Image):
60
+ if isinstance(size, numbers.Number):
61
+ im_w, im_h = clip[0].size
62
+ # Min spatial dim already matches minimal size
63
+ if (im_w <= im_h and im_w == size) or (im_h <= im_w
64
+ and im_h == size):
65
+ return clip
66
+ new_h, new_w = get_resize_sizes(im_h, im_w, size)
67
+ size = (new_w, new_h)
68
+ else:
69
+ size = size[1], size[0]
70
+ if interpolation == 'bilinear':
71
+ pil_inter = PIL.Image.NEAREST
72
+ else:
73
+ pil_inter = PIL.Image.BILINEAR
74
+ scaled = [img.resize(size, pil_inter) for img in clip]
75
+ else:
76
+ raise TypeError('Expected numpy.ndarray or PIL.Image' +
77
+ 'but got list of {0}'.format(type(clip[0])))
78
+ return scaled
79
+
80
+
81
+ def get_resize_sizes(im_h, im_w, size):
82
+ if im_w < im_h:
83
+ ow = size
84
+ oh = int(size * im_h / im_w)
85
+ else:
86
+ oh = size
87
+ ow = int(size * im_w / im_h)
88
+ return oh, ow
89
+
90
+
91
+ class RandomFlip(object):
92
+ def __init__(self, time_flip=False, horizontal_flip=False):
93
+ self.time_flip = time_flip
94
+ self.horizontal_flip = horizontal_flip
95
+
96
+ def __call__(self, clip):
97
+ if random.random() < 0.5 and self.time_flip:
98
+ return clip[::-1]
99
+ if random.random() < 0.5 and self.horizontal_flip:
100
+ return [np.fliplr(img) for img in clip]
101
+
102
+ return clip
103
+
104
+
105
+ class RandomResize(object):
106
+ """Resizes a list of (H x W x C) numpy.ndarray to the final size
107
+ The larger the original image is, the more times it takes to
108
+ interpolate
109
+ Args:
110
+ interpolation (str): Can be one of 'nearest', 'bilinear'
111
+ defaults to nearest
112
+ size (tuple): (widht, height)
113
+ """
114
+
115
+ def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'):
116
+ self.ratio = ratio
117
+ self.interpolation = interpolation
118
+
119
+ def __call__(self, clip):
120
+ scaling_factor = random.uniform(self.ratio[0], self.ratio[1])
121
+
122
+ if isinstance(clip[0], np.ndarray):
123
+ im_h, im_w, im_c = clip[0].shape
124
+ elif isinstance(clip[0], PIL.Image.Image):
125
+ im_w, im_h = clip[0].size
126
+
127
+ new_w = int(im_w * scaling_factor)
128
+ new_h = int(im_h * scaling_factor)
129
+ new_size = (new_w, new_h)
130
+ resized = resize_clip(
131
+ clip, new_size, interpolation=self.interpolation)
132
+
133
+ return resized
134
+
135
+
136
+ class RandomCrop(object):
137
+ """Extract random crop at the same location for a list of videos
138
+ Args:
139
+ size (sequence or int): Desired output size for the
140
+ crop in format (h, w)
141
+ """
142
+
143
+ def __init__(self, size):
144
+ if isinstance(size, numbers.Number):
145
+ size = (size, size)
146
+
147
+ self.size = size
148
+
149
+ def __call__(self, clip):
150
+ """
151
+ Args:
152
+ img (PIL.Image or numpy.ndarray): List of videos to be cropped
153
+ in format (h, w, c) in numpy.ndarray
154
+ Returns:
155
+ PIL.Image or numpy.ndarray: Cropped list of videos
156
+ """
157
+ h, w = self.size
158
+ if isinstance(clip[0], np.ndarray):
159
+ im_h, im_w, im_c = clip[0].shape
160
+ elif isinstance(clip[0], PIL.Image.Image):
161
+ im_w, im_h = clip[0].size
162
+ else:
163
+ raise TypeError('Expected numpy.ndarray or PIL.Image' +
164
+ 'but got list of {0}'.format(type(clip[0])))
165
+
166
+ clip = pad_clip(clip, h, w)
167
+ im_h, im_w = clip.shape[1:3]
168
+ x1 = 0 if h == im_h else random.randint(0, im_w - w)
169
+ y1 = 0 if w == im_w else random.randint(0, im_h - h)
170
+ cropped = crop_clip(clip, y1, x1, h, w)
171
+
172
+ return cropped
173
+
174
+
175
+ class RandomRotation(object):
176
+ """Rotate entire clip randomly by a random angle within
177
+ given bounds
178
+ Args:
179
+ degrees (sequence or int): Range of degrees to select from
180
+ If degrees is a number instead of sequence like (min, max),
181
+ the range of degrees, will be (-degrees, +degrees).
182
+ """
183
+
184
+ def __init__(self, degrees):
185
+ if isinstance(degrees, numbers.Number):
186
+ if degrees < 0:
187
+ raise ValueError('If degrees is a single number,'
188
+ 'must be positive')
189
+ degrees = (-degrees, degrees)
190
+ else:
191
+ if len(degrees) != 2:
192
+ raise ValueError('If degrees is a sequence,'
193
+ 'it must be of len 2.')
194
+
195
+ self.degrees = degrees
196
+
197
+ def __call__(self, clip):
198
+ """
199
+ Args:
200
+ img (PIL.Image or numpy.ndarray): List of videos to be cropped
201
+ in format (h, w, c) in numpy.ndarray
202
+ Returns:
203
+ PIL.Image or numpy.ndarray: Cropped list of videos
204
+ """
205
+ angle = random.uniform(self.degrees[0], self.degrees[1])
206
+ if isinstance(clip[0], np.ndarray):
207
+ rotated = [rotate(image=img, angle=angle, preserve_range=True) for img in clip]
208
+ elif isinstance(clip[0], PIL.Image.Image):
209
+ rotated = [img.rotate(angle) for img in clip]
210
+ else:
211
+ raise TypeError('Expected numpy.ndarray or PIL.Image' +
212
+ 'but got list of {0}'.format(type(clip[0])))
213
+
214
+ return rotated
215
+
216
+
217
+ class ColorJitter(object):
218
+ """Randomly change the brightness, contrast and saturation and hue of the clip
219
+ Args:
220
+ brightness (float): How much to jitter brightness. brightness_factor
221
+ is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
222
+ contrast (float): How much to jitter contrast. contrast_factor
223
+ is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
224
+ saturation (float): How much to jitter saturation. saturation_factor
225
+ is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
226
+ hue(float): How much to jitter hue. hue_factor is chosen uniformly from
227
+ [-hue, hue]. Should be >=0 and <= 0.5.
228
+ """
229
+
230
+ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
231
+ self.brightness = brightness
232
+ self.contrast = contrast
233
+ self.saturation = saturation
234
+ self.hue = hue
235
+
236
+ def get_params(self, brightness, contrast, saturation, hue):
237
+ if brightness > 0:
238
+ brightness_factor = random.uniform(
239
+ max(0, 1 - brightness), 1 + brightness)
240
+ else:
241
+ brightness_factor = None
242
+
243
+ if contrast > 0:
244
+ contrast_factor = random.uniform(
245
+ max(0, 1 - contrast), 1 + contrast)
246
+ else:
247
+ contrast_factor = None
248
+
249
+ if saturation > 0:
250
+ saturation_factor = random.uniform(
251
+ max(0, 1 - saturation), 1 + saturation)
252
+ else:
253
+ saturation_factor = None
254
+
255
+ if hue > 0:
256
+ hue_factor = random.uniform(-hue, hue)
257
+ else:
258
+ hue_factor = None
259
+ return brightness_factor, contrast_factor, saturation_factor, hue_factor
260
+
261
+ def __call__(self, clip):
262
+ """
263
+ Args:
264
+ clip (list): list of PIL.Image
265
+ Returns:
266
+ list PIL.Image : list of transformed PIL.Image
267
+ """
268
+ if isinstance(clip[0], np.ndarray):
269
+ brightness, contrast, saturation, hue = self.get_params(
270
+ self.brightness, self.contrast, self.saturation, self.hue)
271
+
272
+ # Create img transform function sequence
273
+ img_transforms = []
274
+ if brightness is not None:
275
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
276
+ if saturation is not None:
277
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
278
+ if hue is not None:
279
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
280
+ if contrast is not None:
281
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
282
+ random.shuffle(img_transforms)
283
+ img_transforms = [img_as_ubyte, torchvision.transforms.ToPILImage()] + img_transforms + [np.array,
284
+ img_as_float]
285
+
286
+ with warnings.catch_warnings():
287
+ warnings.simplefilter("ignore")
288
+ jittered_clip = []
289
+ for img in clip:
290
+ jittered_img = img
291
+ for func in img_transforms:
292
+ jittered_img = func(jittered_img)
293
+ jittered_clip.append(jittered_img.astype('float32'))
294
+ elif isinstance(clip[0], PIL.Image.Image):
295
+ brightness, contrast, saturation, hue = self.get_params(
296
+ self.brightness, self.contrast, self.saturation, self.hue)
297
+
298
+ # Create img transform function sequence
299
+ img_transforms = []
300
+ if brightness is not None:
301
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness))
302
+ if saturation is not None:
303
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation))
304
+ if hue is not None:
305
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue))
306
+ if contrast is not None:
307
+ img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast))
308
+ random.shuffle(img_transforms)
309
+
310
+ # Apply to all videos
311
+ jittered_clip = []
312
+ for img in clip:
313
+ for func in img_transforms:
314
+ jittered_img = func(img)
315
+ jittered_clip.append(jittered_img)
316
+
317
+ else:
318
+ raise TypeError('Expected numpy.ndarray or PIL.Image' +
319
+ 'but got list of {0}'.format(type(clip[0])))
320
+ return jittered_clip
321
+
322
+
323
+ class AllAugmentationTransform:
324
+ def __init__(self, resize_param=None, rotation_param=None, flip_param=None, crop_param=None, jitter_param=None):
325
+ self.transforms = []
326
+
327
+ if flip_param is not None:
328
+ self.transforms.append(RandomFlip(**flip_param))
329
+
330
+ if rotation_param is not None:
331
+ self.transforms.append(RandomRotation(**rotation_param))
332
+
333
+ if resize_param is not None:
334
+ self.transforms.append(RandomResize(**resize_param))
335
+
336
+ if crop_param is not None:
337
+ self.transforms.append(RandomCrop(**crop_param))
338
+
339
+ if jitter_param is not None:
340
+ self.transforms.append(ColorJitter(**jitter_param))
341
+
342
+ def __call__(self, clip):
343
+ for t in self.transforms:
344
+ clip = t(clip)
345
+ return clip
config/mix-resolution.yml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dataset_params:
2
+ root_dir: ../../../train/cropped_clips_512_vid/
3
+ frame_shape: [512, 512, 3]
4
+ id_sampling: True
5
+ pairs_list: None
6
+ augmentation_params:
7
+ flip_param:
8
+ horizontal_flip: True
9
+ time_flip: True
10
+ jitter_param:
11
+ brightness: 0.1
12
+ contrast: 0.1
13
+ saturation: 0.1
14
+ hue: 0.1
15
+
16
+
17
+ model_params:
18
+ common_params:
19
+ num_kp: 15
20
+ image_channel: 3
21
+ feature_channel: 32
22
+ estimate_jacobian: False
23
+ kp_detector_params:
24
+ temperature: 0.1
25
+ block_expansion: 32
26
+ max_features: 1024
27
+ scale_factor: 0.25
28
+ num_blocks: 5
29
+ reshape_channel: 16384 # 16384 = 1024 * 16
30
+ reshape_depth: 16
31
+ he_estimator_params:
32
+ block_expansion: 64
33
+ max_features: 2048
34
+ num_bins: 66
35
+ generator_params:
36
+ block_expansion: 64
37
+ max_features: 512
38
+ num_down_blocks: 2
39
+ reshape_channel: 32
40
+ reshape_depth: 16 # 512 = 32 * 16
41
+ num_resblocks: 6
42
+ estimate_occlusion_map: True
43
+ dense_motion_params:
44
+ block_expansion: 32
45
+ max_features: 1024
46
+ num_blocks: 5
47
+ # reshape_channel: 32
48
+ reshape_depth: 16
49
+ compress: 4
50
+ discriminator_params:
51
+ scales: [1]
52
+ block_expansion: 32
53
+ max_features: 512
54
+ num_blocks: 4
55
+ sn: True
56
+
57
+ train_params:
58
+ num_epochs: 200
59
+ num_repeats: 5
60
+ num_worker: 8
61
+ epoch_milestones: [16,]
62
+ lr_generator: 2.0e-4
63
+ lr_discriminator: 2.0e-4
64
+ lr_kp_detector: 2.0e-4
65
+ lr_he_estimator: 2.0e-4
66
+ gan_mode: 'hinge' # hinge or ls
67
+ batch_size: 4
68
+ scales: [1, 0.5, 0.25, 0.125]
69
+ checkpoint_freq: 1
70
+ hopenet_snapshot: './checkpoints/hopenet_robust_alpha1.pkl'
71
+ transform_params:
72
+ sigma_affine: 0.05
73
+ sigma_tps: 0.005
74
+ points_tps: 5
75
+ loss_weights:
76
+ generator_gan: 1
77
+ discriminator_gan: 1
78
+ feature_matching: [10, 10, 10, 10]
79
+ perceptual: [10, 10, 10, 10, 10]
80
+ equivariance_value: 10
81
+ equivariance_jacobian: 0
82
+ keypoint: 10
83
+ headpose: 20
84
+ expression: 5
85
+
86
+ visualizer_params:
87
+ kp_size: 5
88
+ draw_border: True
89
+ colormap: 'gist_rainbow'
crop_portrait.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # """
2
+ # Crop upper boddy in every video frame, square bounding box is averaged among all frames and fixed.
3
+ # """
4
+
5
+ # import os
6
+ # import cv2
7
+ # import argparse
8
+ # from tqdm import tqdm
9
+ # import face_recognition
10
+ # import torch
11
+ # import util
12
+ # import numpy as np
13
+ # import face_detection
14
+
15
+ # def crop_per_image(data_dir, dest_size, crop_level):
16
+ # fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')
17
+
18
+ # image_list = util.get_file_list(os.path.join(data_dir, 'full'))
19
+ # batch_size = 5
20
+ # frames = []
21
+
22
+ # for i in tqdm(range(len(image_list))):
23
+ # frame = face_recognition.load_image_file(image_list[i])
24
+ # frames.append(frame)
25
+
26
+ # H, W, _ = frames[0].shape
27
+
28
+ # batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]
29
+
30
+ # for idx in tqdm(range(len(batches))):
31
+ # fb = batches[idx]
32
+ # preds = fa.get_detections_for_batch(np.asarray(fb))
33
+
34
+ # for j, f in enumerate(preds):
35
+ # if f is None:
36
+ # print('no face in image {}'.format(idx * batch_size + j))
37
+ # else:
38
+ # left, top, right, bottom = f
39
+
40
+
41
+ # height = bottom - top
42
+ # width = right - left
43
+ # crop_size = int(height * crop_level)
44
+
45
+ # horizontal_delta = (crop_size - width) // 2
46
+ # vertical_delta = (crop_size - height) // 2
47
+
48
+ # left = max(left - horizontal_delta, 0)
49
+ # right = min(right + horizontal_delta, W)
50
+ # top = max(top - int(vertical_delta * 0.5), 0)
51
+ # bottom = min(bottom + int(vertical_delta * 1.5), H)
52
+
53
+ # crop_f = cv2.imread(image_list[idx * batch_size + j])
54
+ # crop_f = crop_f[top:bottom, left:right]
55
+ # crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
56
+ # cv2.imwrite(os.path.join(data_dir, 'crop', os.path.basename(image_list[idx * batch_size + j])), crop_f)
57
+
58
+
59
+ # if __name__ == '__main__':
60
+ # parser = argparse.ArgumentParser(description='Process some integers.')
61
+ # parser.add_argument('--data_dir', type=str, default=None)
62
+ # parser.add_argument('--dest_size', type=int, default=256)
63
+ # parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop image size.')
64
+ # parser.add_argument('--vertical_adjust', type=float, default=0.3, help='Adjust vertical location of portrait in image.')
65
+ # args = parser.parse_args()
66
+ # util.create_dir(os.path.join(args.data_dir,'crop'))
67
+ # util.create_dir(os.path.join(args.data_dir, 'crop_region'))
68
+ # crop_per_image(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level)
69
+
70
+
71
+ import os
72
+ import cv2
73
+ import argparse
74
+ from tqdm import tqdm
75
+ import face_recognition
76
+ import numpy as np
77
+ import face_detection
78
+ import util
79
+
80
+ def crop_per_frame_and_make_video(data_dir, dest_size, crop_level, video_out_path, fps=30):
81
+ # Initialize face alignment
82
+ fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')
83
+
84
+ # Get list of images (frames)
85
+ image_list = util.get_file_list(os.path.join(data_dir, 'full'))
86
+ batch_size = 5
87
+ frames = []
88
+
89
+ # Load frames
90
+ for image_path in tqdm(image_list, desc='Loading images'):
91
+ frame = cv2.imread(image_path)
92
+ frames.append(frame)
93
+
94
+ H, W, _ = frames[0].shape
95
+ batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]
96
+ cropped_frames = []
97
+
98
+ for idx, fb in enumerate(tqdm(batches, desc='Processing batches')):
99
+ preds = fa.get_detections_for_batch(np.asarray(fb))
100
+
101
+ for j, f in enumerate(preds):
102
+ if f is None:
103
+ print(f'No face in image {idx * batch_size + j}')
104
+ continue # Skip frames with no detected face
105
+
106
+ left, top, right, bottom = f
107
+ height = bottom - top
108
+ width = right - left
109
+ crop_size = int(height * crop_level)
110
+
111
+ horizontal_delta = (crop_size - width) // 2
112
+ vertical_delta = (crop_size - height) // 2
113
+
114
+ left = max(left - horizontal_delta, 0)
115
+ right = min(right + horizontal_delta, W)
116
+ top = max(top - int(vertical_delta * 0.5), 0)
117
+ bottom = min(bottom + int(vertical_delta * 1.5), H)
118
+
119
+ crop_f = fb[j][top:bottom, left:right]
120
+ crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
121
+ cropped_frames.append(crop_f)
122
+
123
+ # Define the codec and create VideoWriter object
124
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
125
+ out = cv2.VideoWriter(video_out_path, fourcc, fps, (dest_size, dest_size))
126
+
127
+ # Write frames to video
128
+ for frame in tqdm(cropped_frames, desc='Compiling video'):
129
+ out.write(frame)
130
+
131
+ # Release everything when job is finished
132
+ out.release()
133
+ cv2.destroyAllWindows()
134
+
135
+ if __name__ == '__main__':
136
+ parser = argparse.ArgumentParser(description='Crop video frames and compile into a video.')
137
+ parser.add_argument('--data_dir', type=str, required=True, help='Directory with video frames to process.')
138
+ parser.add_argument('--dest_size', type=int, default=256, help='Destination size of cropped images.')
139
+ parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop size relative to face detection.')
140
+ parser.add_argument('--video_out_path', type=str, required=True, help='Output path for the resulting video.')
141
+ parser.add_argument('--fps', type=int, default=30, help='Frames per second for the output video.')
142
+ args = parser.parse_args()
143
+
144
+ util.create_dir(os.path.join(args.data_dir, 'crop'))
145
+ crop_per_frame_and_make_video(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level, video_out_path=args.video_out_path, fps=args.fps)
demo.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python demo.py --config config/vox-256-spade.yml --checkpoint checkpoints/00000189-checkpoint.pth.tar --source_image /home/cxu-serve/p61/rzhu14/lsong11_workspace/Thin-Plate-Spline-Motion-Model/assets/test.png --driving_video /home/cxu-serve/p61/rzhu14/lsong11_workspace/Thin-Plate-Spline-Motion-Model/assets/driving.mp4 --relative --adapt_scale --find_best_frame --gen spade
2
+ import matplotlib
3
+ matplotlib.use('Agg')
4
+ import os, sys
5
+ import yaml
6
+ from argparse import ArgumentParser
7
+ from tqdm import tqdm
8
+
9
+ import imageio
10
+ import numpy as np
11
+ from skimage.transform import resize
12
+ from skimage import img_as_ubyte
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from sync_batchnorm import DataParallelWithCallback
16
+
17
+ from modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
18
+ from modules.keypoint_detector import KPDetector, HEEstimator
19
+ from animate import normalize_kp
20
+ from scipy.spatial import ConvexHull
21
+ import warnings
22
+ warnings.filterwarnings("ignore")
23
+
24
+
25
+ if sys.version_info[0] < 3:
26
+ raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
27
+
28
+ def load_checkpoints(config_path, checkpoint_path, gen, cpu=False):
29
+
30
+ with open(config_path) as f:
31
+ config = yaml.load(f)
32
+
33
+ if gen == 'original':
34
+ generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
35
+ **config['model_params']['common_params'])
36
+ elif gen == 'spade':
37
+ generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
38
+ **config['model_params']['common_params'])
39
+
40
+ if not cpu:
41
+ generator.cuda()
42
+
43
+ kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
44
+ **config['model_params']['common_params'])
45
+ if not cpu:
46
+ kp_detector.cuda()
47
+
48
+ he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
49
+ **config['model_params']['common_params'])
50
+ if not cpu:
51
+ he_estimator.cuda()
52
+
53
+ if cpu:
54
+ checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
55
+ else:
56
+ checkpoint = torch.load(checkpoint_path)
57
+
58
+ generator.load_state_dict(checkpoint['generator'])
59
+ kp_detector.load_state_dict(checkpoint['kp_detector'])
60
+ he_estimator.load_state_dict(checkpoint['he_estimator'])
61
+
62
+ if not cpu:
63
+ generator = DataParallelWithCallback(generator)
64
+ kp_detector = DataParallelWithCallback(kp_detector)
65
+ he_estimator = DataParallelWithCallback(he_estimator)
66
+
67
+ generator.eval()
68
+ kp_detector.eval()
69
+ he_estimator.eval()
70
+
71
+ return generator, kp_detector, he_estimator
72
+
73
+
74
+ def headpose_pred_to_degree(pred):
75
+ device = pred.device
76
+ idx_tensor = [idx for idx in range(66)]
77
+ idx_tensor = torch.FloatTensor(idx_tensor).to(device)
78
+ pred = F.softmax(pred)
79
+ degree = torch.sum(pred*idx_tensor, axis=1) * 3 - 99
80
+
81
+ return degree
82
+
83
+ '''
84
+ # beta version
85
+ def get_rotation_matrix(yaw, pitch, roll):
86
+ yaw = yaw / 180 * 3.14
87
+ pitch = pitch / 180 * 3.14
88
+ roll = roll / 180 * 3.14
89
+
90
+ roll = roll.unsqueeze(1)
91
+ pitch = pitch.unsqueeze(1)
92
+ yaw = yaw.unsqueeze(1)
93
+
94
+ roll_mat = torch.cat([torch.ones_like(roll), torch.zeros_like(roll), torch.zeros_like(roll),
95
+ torch.zeros_like(roll), torch.cos(roll), -torch.sin(roll),
96
+ torch.zeros_like(roll), torch.sin(roll), torch.cos(roll)], dim=1)
97
+ roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
98
+
99
+ pitch_mat = torch.cat([torch.cos(pitch), torch.zeros_like(pitch), torch.sin(pitch),
100
+ torch.zeros_like(pitch), torch.ones_like(pitch), torch.zeros_like(pitch),
101
+ -torch.sin(pitch), torch.zeros_like(pitch), torch.cos(pitch)], dim=1)
102
+ pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
103
+
104
+ yaw_mat = torch.cat([torch.cos(yaw), -torch.sin(yaw), torch.zeros_like(yaw),
105
+ torch.sin(yaw), torch.cos(yaw), torch.zeros_like(yaw),
106
+ torch.zeros_like(yaw), torch.zeros_like(yaw), torch.ones_like(yaw)], dim=1)
107
+ yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
108
+
109
+ rot_mat = torch.einsum('bij,bjk,bkm->bim', roll_mat, pitch_mat, yaw_mat)
110
+
111
+ return rot_mat
112
+
113
+ '''
114
+ def get_rotation_matrix(yaw, pitch, roll):
115
+ yaw = yaw / 180 * 3.14
116
+ pitch = pitch / 180 * 3.14
117
+ roll = roll / 180 * 3.14
118
+
119
+ roll = roll.unsqueeze(1)
120
+ pitch = pitch.unsqueeze(1)
121
+ yaw = yaw.unsqueeze(1)
122
+
123
+ pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch),
124
+ torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch),
125
+ torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1)
126
+ pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
127
+
128
+ yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw),
129
+ torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw),
130
+ -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1)
131
+ yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
132
+
133
+ roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll),
134
+ torch.sin(roll), torch.cos(roll), torch.zeros_like(roll),
135
+ torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1)
136
+ roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
137
+
138
+ rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat)
139
+
140
+ return rot_mat
141
+
142
+ def keypoint_transformation(kp_canonical, he, estimate_jacobian=True, free_view=False, yaw=0, pitch=0, roll=0):
143
+ kp = kp_canonical['value']
144
+ if not free_view:
145
+ yaw, pitch, roll = he['yaw'], he['pitch'], he['roll']
146
+ yaw = headpose_pred_to_degree(yaw)
147
+ pitch = headpose_pred_to_degree(pitch)
148
+ roll = headpose_pred_to_degree(roll)
149
+ else:
150
+ if yaw is not None:
151
+ yaw = torch.tensor([yaw]).cuda()
152
+ else:
153
+ yaw = he['yaw']
154
+ yaw = headpose_pred_to_degree(yaw)
155
+ if pitch is not None:
156
+ pitch = torch.tensor([pitch]).cuda()
157
+ else:
158
+ pitch = he['pitch']
159
+ pitch = headpose_pred_to_degree(pitch)
160
+ if roll is not None:
161
+ roll = torch.tensor([roll]).cuda()
162
+ else:
163
+ roll = he['roll']
164
+ roll = headpose_pred_to_degree(roll)
165
+
166
+ t, exp = he['t'], he['exp']
167
+
168
+ rot_mat = get_rotation_matrix(yaw, pitch, roll)
169
+
170
+ # keypoint rotation
171
+ kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)
172
+
173
+ # keypoint translation
174
+ t = t.unsqueeze_(1).repeat(1, kp.shape[1], 1)
175
+ kp_t = kp_rotated + t
176
+
177
+ # add expression deviation
178
+ exp = exp.view(exp.shape[0], -1, 3)
179
+ kp_transformed = kp_t + exp
180
+
181
+ if estimate_jacobian:
182
+ jacobian = kp_canonical['jacobian']
183
+ jacobian_transformed = torch.einsum('bmp,bkps->bkms', rot_mat, jacobian)
184
+ else:
185
+ jacobian_transformed = None
186
+
187
+ return {'value': kp_transformed, 'jacobian': jacobian_transformed}
188
+
189
+ def make_animation(source_image, driving_video, generator, kp_detector, he_estimator, relative=True, adapt_movement_scale=True, estimate_jacobian=True, cpu=False, free_view=False, yaw=0, pitch=0, roll=0):
190
+ with torch.no_grad():
191
+ predictions = []
192
+ source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
193
+ if not cpu:
194
+ source = source.cuda()
195
+ driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
196
+ kp_canonical = kp_detector(source)
197
+ he_source = he_estimator(source)
198
+ he_driving_initial = he_estimator(driving[:, :, 0])
199
+
200
+ kp_source = keypoint_transformation(kp_canonical, he_source, estimate_jacobian)
201
+ kp_driving_initial = keypoint_transformation(kp_canonical, he_driving_initial, estimate_jacobian)
202
+ # kp_driving_initial = keypoint_transformation(kp_canonical, he_driving_initial, free_view=free_view, yaw=yaw, pitch=pitch, roll=roll)
203
+
204
+ for frame_idx in tqdm(range(driving.shape[2])):
205
+ driving_frame = driving[:, :, frame_idx]
206
+ if not cpu:
207
+ driving_frame = driving_frame.cuda()
208
+ he_driving = he_estimator(driving_frame)
209
+ kp_driving = keypoint_transformation(kp_canonical, he_driving, estimate_jacobian, free_view=free_view, yaw=yaw, pitch=pitch, roll=roll)
210
+
211
+ # np.save('all_kps/%05d.npy'%frame_idx, kp_driving['value'].cpu().detach().numpy())
212
+ # import pdb; pdb.set_trace()
213
+ kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
214
+ kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
215
+ use_relative_jacobian=estimate_jacobian, adapt_movement_scale=adapt_movement_scale)
216
+ out = generator(source, frame_idx, kp_source=kp_source, kp_driving=kp_norm)
217
+
218
+ predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
219
+ return predictions
220
+
221
+ def find_best_frame(source, driving, cpu=False):
222
+ import face_alignment
223
+
224
+ def normalize_kp(kp):
225
+ kp = kp - kp.mean(axis=0, keepdims=True)
226
+ area = ConvexHull(kp[:, :2]).volume
227
+ area = np.sqrt(area)
228
+ kp[:, :2] = kp[:, :2] / area
229
+ return kp
230
+
231
+ # fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True,
232
+ # device='cpu' if cpu else 'cuda')
233
+
234
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType.TWO_D, flip_input=True,
235
+ device='cpu' if cpu else 'cuda')
236
+ kp_source = fa.get_landmarks(255 * source)[0]
237
+ kp_source = normalize_kp(kp_source)
238
+ norm = float('inf')
239
+ frame_num = 0
240
+ for i, image in tqdm(enumerate(driving)):
241
+ kp_driving = fa.get_landmarks(255 * image)[0]
242
+ kp_driving = normalize_kp(kp_driving)
243
+ new_norm = (np.abs(kp_source - kp_driving) ** 2).sum()
244
+ if new_norm < norm:
245
+ norm = new_norm
246
+ frame_num = i
247
+ return frame_num
248
+
249
+ if __name__ == "__main__":
250
+ parser = ArgumentParser()
251
+ parser.add_argument("--config", default='config/vox-256.yaml', help="path to config")
252
+ parser.add_argument("--checkpoint", default='', help="path to checkpoint to restore")
253
+
254
+ parser.add_argument("--source_image", default='', help="path to source image")
255
+ parser.add_argument("--driving_video", default='', help="path to driving video")
256
+ parser.add_argument("--result_video", default='./results_hq.mp4', help="path to output")
257
+
258
+ parser.add_argument("--gen", default="spade", choices=["original", "spade"])
259
+
260
+ parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
261
+ parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
262
+
263
+ parser.add_argument("--find_best_frame", dest="find_best_frame", action="store_true",
264
+ help="Generate from the frame that is the most alligned with source. (Only for faces, requires face_aligment lib)")
265
+
266
+ parser.add_argument("--best_frame", dest="best_frame", type=int, default=None,
267
+ help="Set frame to start from.")
268
+
269
+ parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
270
+
271
+ parser.add_argument("--free_view", dest="free_view", action="store_true", help="control head pose")
272
+ parser.add_argument("--yaw", dest="yaw", type=int, default=None, help="yaw")
273
+ parser.add_argument("--pitch", dest="pitch", type=int, default=None, help="pitch")
274
+ parser.add_argument("--roll", dest="roll", type=int, default=None, help="roll")
275
+
276
+
277
+ parser.set_defaults(relative=False)
278
+ parser.set_defaults(adapt_scale=False)
279
+ parser.set_defaults(free_view=False)
280
+
281
+ opt = parser.parse_args()
282
+
283
+ source_image = imageio.imread(opt.source_image)
284
+ reader = imageio.get_reader(opt.driving_video)
285
+ fps = reader.get_meta_data()['fps']
286
+ driving_video = []
287
+ try:
288
+ for im in reader:
289
+ driving_video.append(im)
290
+ except RuntimeError:
291
+ pass
292
+ reader.close()
293
+
294
+ source_image = resize(source_image, (512, 512))[..., :3]
295
+ driving_video = [resize(frame, (512, 512))[..., :3] for frame in driving_video]
296
+ generator, kp_detector, he_estimator = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, gen=opt.gen, cpu=opt.cpu)
297
+
298
+ with open(opt.config) as f:
299
+ config = yaml.load(f)
300
+ estimate_jacobian = config['model_params']['common_params']['estimate_jacobian']
301
+ print(f'estimate jacobian: {estimate_jacobian}')
302
+
303
+ if opt.find_best_frame or opt.best_frame is not None:
304
+ i = opt.best_frame if opt.best_frame is not None else find_best_frame(source_image, driving_video, cpu=opt.cpu)
305
+ print ("Best frame: " + str(i))
306
+ driving_forward = driving_video[i:]
307
+ driving_backward = driving_video[:(i+1)][::-1]
308
+ predictions_forward = make_animation(source_image, driving_forward, generator, kp_detector, he_estimator, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, estimate_jacobian=estimate_jacobian, cpu=opt.cpu, free_view=opt.free_view, yaw=opt.yaw, pitch=opt.pitch, roll=opt.roll)
309
+ predictions_backward = make_animation(source_image, driving_backward, generator, kp_detector, he_estimator, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, estimate_jacobian=estimate_jacobian, cpu=opt.cpu, free_view=opt.free_view, yaw=opt.yaw, pitch=opt.pitch, roll=opt.roll)
310
+ predictions = predictions_backward[::-1] + predictions_forward[1:]
311
+ else:
312
+ predictions = make_animation(source_image, driving_video, generator, kp_detector, he_estimator, relative=opt.relative, adapt_movement_scale=opt.adapt_scale, estimate_jacobian=estimate_jacobian, cpu=opt.cpu, free_view=opt.free_view, yaw=opt.yaw, pitch=opt.pitch, roll=opt.roll)
313
+ imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps)
environment.yaml ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: mesh-video
2
+ channels:
3
+ - pytorch
4
+ - conda-forge
5
+ - defaults
6
+ dependencies:
7
+ - _libgcc_mutex=0.1=main
8
+ - _openmp_mutex=5.1=1_gnu
9
+ - blas=1.0=mkl
10
+ - bzip2=1.0.8=h7b6447c_0
11
+ - ca-certificates=2023.01.10=h06a4308_0
12
+ - certifi=2022.12.7=py38h06a4308_0
13
+ - cudatoolkit=11.3.1=h9edb442_10
14
+ - flit-core=3.8.0=py38h06a4308_0
15
+ - freetype=2.12.1=h4a9f257_0
16
+ - giflib=5.2.1=h5eee18b_3
17
+ - gmp=6.2.1=h295c915_3
18
+ - gnutls=3.6.15=he1e5248_0
19
+ - intel-openmp=2021.4.0=h06a4308_3561
20
+ - jpeg=9e=h5eee18b_1
21
+ - lame=3.100=h7b6447c_0
22
+ - lcms2=2.12=h3be6417_0
23
+ - lerc=3.0=h295c915_0
24
+ - libdeflate=1.17=h5eee18b_0
25
+ - libedit=3.1.20221030=h5eee18b_0
26
+ - libffi=3.2.1=hf484d3e_1007
27
+ - libgcc-ng=11.2.0=h1234567_1
28
+ - libgomp=11.2.0=h1234567_1
29
+ - libidn2=2.3.2=h7f8727e_0
30
+ - libopus=1.3.1=h7b6447c_0
31
+ - libpng=1.6.39=h5eee18b_0
32
+ - libstdcxx-ng=11.2.0=h1234567_1
33
+ - libtasn1=4.19.0=h5eee18b_0
34
+ - libtiff=4.5.0=h6a678d5_2
35
+ - libunistring=0.9.10=h27cfd23_0
36
+ - libuv=1.44.2=h5eee18b_0
37
+ - libvpx=1.7.0=h439df22_0
38
+ - libwebp=1.2.4=h11a3e52_1
39
+ - libwebp-base=1.2.4=h5eee18b_1
40
+ - lz4-c=1.9.4=h6a678d5_0
41
+ - mkl=2021.4.0=h06a4308_640
42
+ - mkl-service=2.4.0=py38h7f8727e_0
43
+ - mkl_fft=1.3.1=py38hd3c417c_0
44
+ - mkl_random=1.2.2=py38h51133e4_0
45
+ - ncurses=6.4=h6a678d5_0
46
+ - nettle=3.7.3=hbbd107a_1
47
+ - numpy-base=1.23.5=py38h31eccc5_0
48
+ - openh264=2.1.1=h4ff587b_0
49
+ - openssl=1.1.1t=h7f8727e_0
50
+ - pillow=9.4.0=py38h6a678d5_0
51
+ - pip=23.0.1=py38h06a4308_0
52
+ - python=3.8.0=h0371630_2
53
+ - pytorch=1.10.1=py3.8_cuda11.3_cudnn8.2.0_0
54
+ - pytorch-mutex=1.0=cuda
55
+ - readline=7.0=h7b6447c_5
56
+ - setuptools=65.6.3=py38h06a4308_0
57
+ - six=1.16.0=pyhd3eb1b0_1
58
+ - sqlite=3.33.0=h62c20be_0
59
+ - tk=8.6.12=h1ccaba5_0
60
+ - torchaudio=0.10.1=py38_cu113
61
+ - torchvision=0.11.2=py38_cu113
62
+ - typing_extensions=4.4.0=py38h06a4308_0
63
+ - wheel=0.38.4=py38h06a4308_0
64
+ - x264=1!157.20191217=h7b6447c_0
65
+ - xz=5.2.10=h5eee18b_1
66
+ - zlib=1.2.13=h5eee18b_0
67
+ - zstd=1.5.4=hc292b87_0
68
+ - pip:
69
+ - cffi==1.14.6
70
+ - cycler==0.10.0
71
+ - decorator==5.1.0
72
+ - face-alignment==1.3.5
73
+ - ffmpeg==1.4
74
+ - imageio==2.9.0
75
+ - imageio-ffmpeg==0.4.5
76
+ - importlib-metadata==6.0.0
77
+ - joblib==1.2.0
78
+ - kiwisolver==1.3.2
79
+ - llvmlite==0.39.1
80
+ - matplotlib==3.4.3
81
+ - networkx==2.6.3
82
+ - numba==0.56.4
83
+ - numpy==1.20.3
84
+ - nvidia-cublas-cu11==11.10.3.66
85
+ - nvidia-cuda-nvrtc-cu11==11.7.99
86
+ - nvidia-cuda-runtime-cu11==11.7.99
87
+ - nvidia-cudnn-cu11==8.5.0.96
88
+ - opencv-python==4.7.0.72
89
+ - pandas==1.3.3
90
+ - pycparser==2.20
91
+ - pyparsing==2.4.7
92
+ - python-dateutil==2.8.2
93
+ - pytube==12.1.3
94
+ - pytz==2021.1
95
+ - pywavelets==1.1.1
96
+ - pyyaml==5.4.1
97
+ - scikit-image==0.18.3
98
+ - scikit-learn==1.0
99
+ - scipy==1.7.1
100
+ - threadpoolctl==3.1.0
101
+ - tifffile==2023.2.28
102
+ - torch==1.13.1
103
+ - tqdm==4.62.3
104
+ - typing-extensions==4.5.0
105
+ - zipp==3.15.0
106
+ prefix: /home/songlc/miniconda3/envs/mesh-video
frames_dataset.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #CUDA_VISIBLE_DEVICES=1 python run.py --config log_TH1K/finetune-th1k-spade.yml --device_ids 0 --checkpoint log_TH1K/00000001-checkpoint.pth.tar
2
+ import os
3
+ from skimage import io, img_as_float32
4
+ from skimage.color import gray2rgb
5
+ from sklearn.model_selection import train_test_split
6
+ from imageio import mimread
7
+ from functools import partial
8
+ from skimage.transform import resize
9
+
10
+
11
+ import torch
12
+ import random
13
+ import numpy as np
14
+ from torch.utils.data import Dataset
15
+ import pandas as pd
16
+ from augmentation import AllAugmentationTransform
17
+ import glob
18
+ import math
19
+
20
+ import pickle
21
+ from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
22
+ from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
23
+
24
+
25
+
26
+ def read_video(name, frame_shape):
27
+ """
28
+ Read video which can be:
29
+ - an image of concatenated frames
30
+ - '.mp4' and'.gif'
31
+ - folder with videos
32
+ """
33
+
34
+ if os.path.isdir(name):
35
+
36
+ frames = sorted(os.listdir(name))
37
+ num_frames = len(frames)
38
+ video_array = np.array(
39
+ [img_as_float32(io.imread(os.path.join(name, frames[idx]))) for idx in range(num_frames)])
40
+ elif name.lower().endswith('.png') or name.lower().endswith('.jpg'):
41
+ image = io.imread(name)
42
+
43
+ if len(image.shape) == 2 or image.shape[2] == 1:
44
+ image = gray2rgb(image)
45
+
46
+ if image.shape[2] == 4:
47
+ image = image[..., :3]
48
+
49
+ image = img_as_float32(image)
50
+
51
+ video_array = np.moveaxis(image, 1, 0)
52
+
53
+ video_array = video_array.reshape((-1,) + frame_shape)
54
+ video_array = np.moveaxis(video_array, 1, 2)
55
+
56
+ elif name.lower().endswith('.gif') or name.lower().endswith('.mp4') or name.lower().endswith('.mov'):
57
+ video = np.array(mimread(name))
58
+ if len(video.shape) == 3:
59
+ video = np.array([gray2rgb(frame) for frame in video])
60
+ if video.shape[-1] == 4:
61
+ video = video[..., :3]
62
+ video_array = img_as_float32(video)
63
+ else:
64
+ raise Exception("Unknown file extensions %s" % name)
65
+
66
+ return video_array
67
+
68
+
69
+ class FramesDataset(Dataset):
70
+ """
71
+ Dataset of videos, each video can be represented as:
72
+ - an image of concatenated frames
73
+ - '.mp4' or '.gif'
74
+ - folder with all frames
75
+ """
76
+
77
+ def __init__(self, root_dir, frame_shape=(256, 256, 3), id_sampling=False, is_train=True,
78
+ random_seed=0, pairs_list=None, augmentation_params=None):
79
+ self.root_dir = root_dir
80
+
81
+ tmp_file = open(root_dir + 'train_file_list.pickle','rb')
82
+ self.train_files_list = pickle.load(tmp_file)
83
+
84
+ self.videos = os.listdir(root_dir)
85
+ self.frame_shape = tuple(frame_shape)
86
+ self.pairs_list = pairs_list
87
+ self.id_sampling = id_sampling
88
+ if os.path.exists(os.path.join(root_dir, 'train')):
89
+ assert os.path.exists(os.path.join(root_dir, 'test'))
90
+ print("Use predefined train-test split.")
91
+ if id_sampling:
92
+ # train_videos = {os.path.basename(video).split('#')[0] for video in
93
+ # os.listdir(os.path.join(root_dir, 'train'))}
94
+ # train_videos = list(train_videos)
95
+ train_videos = list(self.train_files_list.keys())
96
+ else:
97
+ train_videos = os.listdir(os.path.join(root_dir, 'train'))
98
+ test_videos = os.listdir(os.path.join(root_dir, 'test'))
99
+ self.root_dir = os.path.join(self.root_dir, 'train' if is_train else 'test')
100
+ else:
101
+ print("Use random train-test split.")
102
+ train_videos, test_videos = train_test_split(self.videos, random_state=random_seed, test_size=0.2)
103
+
104
+ if is_train:
105
+ self.videos = train_videos
106
+ else:
107
+ self.videos = test_videos
108
+
109
+ self.is_train = is_train
110
+
111
+ if self.is_train:
112
+ self.transform = AllAugmentationTransform(**augmentation_params)
113
+
114
+ #### for degradation ####
115
+
116
+
117
+ self.kernel_range = [2 * v + 1 for v in range(1,3)]
118
+ self.pulse_tensor = torch.zeros(11, 11).float()
119
+ self.pulse_tensor[5, 5] = 1
120
+
121
+ self.resize_range = [0.15, 1.5]
122
+
123
+ # blur settings for the first degradation
124
+ self.blur_kernel_size = 7
125
+ self.kernel_list = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
126
+ self.kernel_prob = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03] # a list for each kernel probability
127
+ self.blur_sigma = [0.1, 0.5]
128
+ self.betag_range = [0.2, 1] # betag used in generalized Gaussian blur kernels
129
+ self.betap_range = [0.5, 1.2] # betap used in plateau blur kernels
130
+ self.sinc_prob = 0.1 # the probability for sinc filters
131
+
132
+ # blur settings for the second degradation
133
+ self.blur_kernel_size2 = 7
134
+ self.kernel_list2 = ['iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso']
135
+ self.kernel_prob2 = [0.45, 0.25, 0.12, 0.03, 0.12, 0.03]
136
+ self.blur_sigma2 = [0.1, 0.5]
137
+ self.betag_range2 = [0.2, 1]
138
+ self.betap_range2 = [1, 1.2]
139
+ self.sinc_prob2 = 0.1
140
+ else:
141
+ self.transform = None
142
+
143
+ def __len__(self):
144
+ return len(self.videos)
145
+
146
+ def __getitem__(self, idx):
147
+ if self.is_train and self.id_sampling:
148
+ # name = self.videos[idx]
149
+ # path = np.random.choice(glob.glob(os.path.join(self.root_dir, name + '*.mp4')))
150
+ name = self.videos[idx]
151
+ choice_list = self.train_files_list[name]
152
+ # if len(choice_list) == 0:
153
+ # name = self.videos[idx-1]
154
+ # choice_list = self.train_files_list[name]
155
+ paths = np.random.choice(choice_list)
156
+ else:
157
+ name = self.videos[idx]
158
+ paths = os.path.join(self.root_dir, name)
159
+
160
+ video_name = os.path.basename(paths)
161
+ if self.is_train and os.path.isdir(paths):
162
+ frames = os.listdir(paths)
163
+ num_frames = len(frames)
164
+ frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2))
165
+
166
+
167
+ if self.frame_shape is not None:
168
+ resize_fn = partial(resize, output_shape=self.frame_shape)
169
+ else:
170
+ resize_fn = img_as_float32
171
+ video_array = [resize_fn(img_as_float32(io.imread(paths + '/' + '%06d.jpg'%(idx) ))) for idx in frame_idx]
172
+
173
+
174
+ else:
175
+ video_array = read_video(paths, frame_shape=self.frame_shape)
176
+ num_frames = len(video_array)
177
+ frame_idx = np.sort(np.random.choice(num_frames, replace=True, size=2)) if self.is_train else range(
178
+ num_frames)
179
+ video_array = video_array[frame_idx]
180
+
181
+ if self.transform is not None:
182
+ video_array = self.transform(video_array)
183
+
184
+ out = {}
185
+ if self.is_train:
186
+ source = np.array(video_array[0], dtype='float32')
187
+ driving = np.array(video_array[1], dtype='float32')
188
+ out['driving'] = driving.transpose((2, 0, 1))
189
+ out['source'] = source.transpose((2, 0, 1))
190
+
191
+ # if self.degradation:
192
+ ############ run degradation ############
193
+ # ---- Generate kernels (used in the first degradation) ---- #
194
+ kernel_size = random.choice(self.kernel_range)
195
+ if np.random.uniform() < 0.1:
196
+ # this sinc filter setting is for kernels ranging from [7, 21]
197
+ if kernel_size < 11:
198
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
199
+ else:
200
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
201
+ kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
202
+ else:
203
+ kernel = random_mixed_kernels(
204
+ self.kernel_list,
205
+ self.kernel_prob,
206
+ kernel_size,
207
+ self.blur_sigma,
208
+ self.blur_sigma, [-math.pi, math.pi],
209
+ self.betag_range,
210
+ self.betap_range,
211
+ noise_range=None)
212
+ # pad kernel
213
+ pad_size = (21 - kernel_size) // 2
214
+ kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
215
+
216
+
217
+ # ----- Generate kernels (used in the second degradation) ---- #
218
+ kernel_size = random.choice(self.kernel_range)
219
+ if np.random.uniform() < 0.1:
220
+ if kernel_size < 13:
221
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
222
+ else:
223
+ omega_c = np.random.uniform(np.pi / 5, np.pi)
224
+ kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
225
+ else:
226
+ kernel2 = random_mixed_kernels(
227
+ self.kernel_list2,
228
+ self.kernel_prob2,
229
+ kernel_size,
230
+ self.blur_sigma2,
231
+ self.blur_sigma2, [-math.pi, math.pi],
232
+ self.betag_range2,
233
+ self.betap_range2,
234
+ noise_range=None)
235
+ # pad kernel
236
+ pad_size = (21 - kernel_size) // 2
237
+ kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
238
+
239
+ # ---- the final sinc kernel ---- #
240
+ if np.random.uniform() < 0.8:
241
+ kernel_size = random.choice(self.kernel_range)
242
+ omega_c = np.random.uniform(np.pi / 3, np.pi)
243
+ sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=11)
244
+ sinc_kernel = torch.FloatTensor(sinc_kernel)
245
+ else:
246
+ sinc_kernel = self.pulse_tensor
247
+
248
+ # BGR to RGB, HWC to CHW, numpy to tensor
249
+ # img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
250
+ kernel = torch.FloatTensor(kernel)
251
+ kernel2 = torch.FloatTensor(kernel2)
252
+ #########################################
253
+
254
+ out['kernel'] = kernel
255
+ out['kernel2']= kernel2
256
+ out['sinc_kernel'] = sinc_kernel
257
+
258
+ else:
259
+ video = np.array(video_array, dtype='float32')
260
+ out['video'] = video.transpose((3, 0, 1, 2))
261
+
262
+ out['name'] = video_name
263
+
264
+ return out
265
+
266
+
267
+ class DatasetRepeater(Dataset):
268
+ """
269
+ Pass several times over the same dataset for better i/o performance
270
+ """
271
+
272
+ def __init__(self, dataset, num_repeats=100):
273
+ self.dataset = dataset
274
+ self.num_repeats = num_repeats
275
+
276
+ def __len__(self):
277
+ return self.num_repeats * self.dataset.__len__()
278
+
279
+ def __getitem__(self, idx):
280
+ return self.dataset[idx % self.dataset.__len__()]
logger.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import imageio
5
+
6
+ import os
7
+ from skimage.draw import circle
8
+
9
+ import matplotlib.pyplot as plt
10
+ import collections
11
+
12
+
13
+ class Logger:
14
+ def __init__(self, log_dir, checkpoint_freq=100, visualizer_params=None, zfill_num=8, log_file_name='log.txt'):
15
+
16
+ self.loss_list = []
17
+ self.cpk_dir = log_dir
18
+ self.visualizations_dir = os.path.join(log_dir, 'train-vis')
19
+ if not os.path.exists(self.visualizations_dir):
20
+ os.makedirs(self.visualizations_dir)
21
+ self.log_file = open(os.path.join(log_dir, log_file_name), 'a')
22
+ self.zfill_num = zfill_num
23
+ self.visualizer = Visualizer(**visualizer_params)
24
+ self.checkpoint_freq = checkpoint_freq
25
+ self.epoch = 0
26
+ self.best_loss = float('inf')
27
+ self.names = None
28
+
29
+ def log_scores(self, loss_names):
30
+ loss_mean = np.array(self.loss_list).mean(axis=0)
31
+
32
+ loss_string = "; ".join(["%s - %.5f" % (name, value) for name, value in zip(loss_names, loss_mean)])
33
+ loss_string = str(self.epoch).zfill(self.zfill_num) + ") " + loss_string
34
+
35
+ print(loss_string, file=self.log_file)
36
+ self.loss_list = []
37
+ self.log_file.flush()
38
+
39
+ def visualize_rec(self, inp, out):
40
+ image = self.visualizer.visualize(inp['driving'], inp['source'], out)
41
+ imageio.imsave(os.path.join(self.visualizations_dir, "%s-rec.png" % str(self.epoch).zfill(self.zfill_num)), image)
42
+
43
+ def save_cpk(self, emergent=False):
44
+ cpk = {k: v.state_dict() for k, v in self.models.items()}
45
+ cpk['epoch'] = self.epoch
46
+ cpk_path = os.path.join(self.cpk_dir, '%s-checkpoint.pth.tar' % str(self.epoch + 1).zfill(self.zfill_num))
47
+ if not (os.path.exists(cpk_path) and emergent):
48
+ torch.save(cpk, cpk_path)
49
+
50
+ @staticmethod
51
+ def load_cpk(checkpoint_path, generator=None, discriminator=None, kp_detector=None, he_estimator=None,
52
+ optimizer_generator=None, optimizer_discriminator=None, optimizer_kp_detector=None, optimizer_he_estimator=None):
53
+ checkpoint = torch.load(checkpoint_path)
54
+ if generator is not None:
55
+ generator.load_state_dict(checkpoint['generator'])
56
+ if kp_detector is not None:
57
+ kp_detector.load_state_dict(checkpoint['kp_detector'])
58
+ if he_estimator is not None:
59
+ he_estimator.load_state_dict(checkpoint['he_estimator'])
60
+ if discriminator is not None:
61
+ try:
62
+ discriminator.load_state_dict(checkpoint['discriminator'])
63
+ except:
64
+ print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
65
+ if optimizer_generator is not None:
66
+ optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
67
+ if optimizer_discriminator is not None:
68
+ try:
69
+ optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
70
+ except RuntimeError as e:
71
+ print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
72
+ if optimizer_kp_detector is not None:
73
+ optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
74
+ if optimizer_he_estimator is not None:
75
+ optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
76
+
77
+ return checkpoint['epoch']
78
+
79
+ def __enter__(self):
80
+ return self
81
+
82
+ def __exit__(self, exc_type, exc_val, exc_tb):
83
+ if 'models' in self.__dict__:
84
+ self.save_cpk()
85
+ self.log_file.close()
86
+
87
+ def log_iter(self, losses):
88
+ losses = collections.OrderedDict(losses.items())
89
+ if self.names is None:
90
+ self.names = list(losses.keys())
91
+ self.loss_list.append(list(losses.values()))
92
+
93
+ def log_epoch(self, epoch, models, inp, out):
94
+ self.epoch = epoch
95
+ self.models = models
96
+ if (self.epoch + 1) % self.checkpoint_freq == 0:
97
+ self.save_cpk()
98
+ self.log_scores(self.names)
99
+ self.visualize_rec(inp, out)
100
+
101
+
102
+ class Visualizer:
103
+ def __init__(self, kp_size=5, draw_border=False, colormap='gist_rainbow'):
104
+ self.kp_size = kp_size
105
+ self.draw_border = draw_border
106
+ self.colormap = plt.get_cmap(colormap)
107
+
108
+ def draw_image_with_kp(self, image, kp_array):
109
+ image = np.copy(image)
110
+ spatial_size = np.array(image.shape[:2][::-1])[np.newaxis]
111
+ kp_array = spatial_size * (kp_array + 1) / 2
112
+ num_kp = kp_array.shape[0]
113
+ for kp_ind, kp in enumerate(kp_array):
114
+ rr, cc = circle(kp[1], kp[0], self.kp_size, shape=image.shape[:2])
115
+ image[rr, cc] = np.array(self.colormap(kp_ind / num_kp))[:3]
116
+ return image
117
+
118
+ def create_image_column_with_kp(self, images, kp):
119
+ image_array = np.array([self.draw_image_with_kp(v, k) for v, k in zip(images, kp)])
120
+ return self.create_image_column(image_array)
121
+
122
+ def create_image_column(self, images):
123
+ if self.draw_border:
124
+ images = np.copy(images)
125
+ images[:, :, [0, -1]] = (1, 1, 1)
126
+ images[:, :, [0, -1]] = (1, 1, 1)
127
+ return np.concatenate(list(images), axis=0)
128
+
129
+ def create_image_grid(self, *args):
130
+ out = []
131
+ for arg in args:
132
+ if type(arg) == tuple:
133
+ out.append(self.create_image_column_with_kp(arg[0], arg[1]))
134
+ else:
135
+ out.append(self.create_image_column(arg))
136
+ return np.concatenate(out, axis=1)
137
+
138
+ def visualize(self, driving, source, out):
139
+ images = []
140
+
141
+ # Source image with keypoints
142
+ source = source.data.cpu()
143
+ kp_source = out['kp_source']['value'][:, :, :2].data.cpu().numpy() # 3d -> 2d
144
+ source = np.transpose(source, [0, 2, 3, 1])
145
+ images.append((source, kp_source))
146
+
147
+ # Equivariance visualization
148
+ if 'transformed_frame' in out:
149
+ transformed = out['transformed_frame'].data.cpu().numpy()
150
+ transformed = np.transpose(transformed, [0, 2, 3, 1])
151
+ transformed_kp = out['transformed_kp']['value'][:, :, :2].data.cpu().numpy() # 3d -> 2d
152
+ images.append((transformed, transformed_kp))
153
+
154
+ # Driving image with keypoints
155
+ kp_driving = out['kp_driving']['value'][:, :, :2].data.cpu().numpy() # 3d -> 2d
156
+ driving = driving.data.cpu().numpy()
157
+ driving = np.transpose(driving, [0, 2, 3, 1])
158
+ images.append((driving, kp_driving))
159
+
160
+ # Result
161
+ prediction = out['prediction'].data.cpu().numpy()
162
+ prediction = np.transpose(prediction, [0, 2, 3, 1])
163
+ images.append(prediction)
164
+
165
+ ## Occlusion map
166
+ if 'occlusion_map' in out:
167
+ occlusion_map = out['occlusion_map'].data.cpu().repeat(1, 3, 1, 1)
168
+ occlusion_map = F.interpolate(occlusion_map, size=source.shape[1:3]).numpy()
169
+ occlusion_map = np.transpose(occlusion_map, [0, 2, 3, 1])
170
+ images.append(occlusion_map)
171
+
172
+ ## Mask
173
+ if 'mask' in out:
174
+ for i in range(out['mask'].shape[1]):
175
+ mask = out['mask'][:, i:(i+1)].data.cpu().sum(2).repeat(1, 3, 1, 1) # (n, 3, h, w)
176
+ # mask = F.softmax(mask.view(mask.shape[0], mask.shape[1], -1), dim=2).view(mask.shape)
177
+ mask = F.interpolate(mask, size=source.shape[1:3]).numpy()
178
+ mask = np.transpose(mask, [0, 2, 3, 1])
179
+
180
+ if i != 0:
181
+ color = np.array(self.colormap((i - 1) / (out['mask'].shape[1] - 1)))[:3]
182
+ else:
183
+ color = np.array((0, 0, 0))
184
+
185
+ color = color.reshape((1, 1, 1, 3))
186
+
187
+ if i != 0:
188
+ images.append(mask * color)
189
+ else:
190
+ images.append(mask)
191
+
192
+ image = self.create_image_grid(*images)
193
+ image = (255 * image).astype(np.uint8)
194
+ return image
media/Teaser_video.png ADDED

Git LFS Details

  • SHA256: 6cf2f2d8a726fdde33c511054cb6ebb83c1727608c1f7bcfa0514693b095cc26
  • Pointer size: 132 Bytes
  • Size of remote file: 2.09 MB
modules/__pycache__/dense_motion.cpython-36.pyc ADDED
Binary file (3.86 kB). View file
 
modules/__pycache__/dense_motion.cpython-37.pyc ADDED
Binary file (3.82 kB). View file
 
modules/__pycache__/dense_motion.cpython-38.pyc ADDED
Binary file (3.81 kB). View file
 
modules/__pycache__/discriminator.cpython-36.pyc ADDED
Binary file (3.22 kB). View file
 
modules/__pycache__/discriminator.cpython-37.pyc ADDED
Binary file (3.21 kB). View file
 
modules/__pycache__/discriminator.cpython-38.pyc ADDED
Binary file (3.18 kB). View file
 
modules/__pycache__/generator.cpython-36.pyc ADDED
Binary file (7.01 kB). View file
 
modules/__pycache__/generator.cpython-37.pyc ADDED
Binary file (6.86 kB). View file
 
modules/__pycache__/generator.cpython-38.pyc ADDED
Binary file (6.65 kB). View file
 
modules/__pycache__/hopenet.cpython-36.pyc ADDED
Binary file (5.17 kB). View file
 
modules/__pycache__/hopenet.cpython-37.pyc ADDED
Binary file (5.13 kB). View file
 
modules/__pycache__/hopenet.cpython-38.pyc ADDED
Binary file (4.92 kB). View file
 
modules/__pycache__/keypoint_detector.cpython-36.pyc ADDED
Binary file (4.82 kB). View file
 
modules/__pycache__/keypoint_detector.cpython-37.pyc ADDED
Binary file (4.79 kB). View file
 
modules/__pycache__/keypoint_detector.cpython-38.pyc ADDED
Binary file (4.78 kB). View file
 
modules/__pycache__/model.cpython-36.pyc ADDED
Binary file (13 kB). View file
 
modules/__pycache__/model.cpython-37.pyc ADDED
Binary file (15.3 kB). View file
 
modules/__pycache__/model.cpython-38.pyc ADDED
Binary file (15.3 kB). View file
 
modules/__pycache__/util.cpython-36.pyc ADDED
Binary file (15.5 kB). View file
 
modules/__pycache__/util.cpython-37.pyc ADDED
Binary file (15.4 kB). View file
 
modules/__pycache__/util.cpython-38.pyc ADDED
Binary file (14.5 kB). View file
 
modules/dense_motion.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn
2
+ import torch.nn.functional as F
3
+ import torch
4
+ from modules.util import Hourglass, make_coordinate_grid, kp2gaussian
5
+
6
+ from sync_batchnorm import SynchronizedBatchNorm3d as BatchNorm3d
7
+
8
+
9
+ class DenseMotionNetwork(nn.Module):
10
+ """
11
+ Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
12
+ """
13
+
14
+ def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress,
15
+ estimate_occlusion_map=False):
16
+ super(DenseMotionNetwork, self).__init__()
17
+ # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks)
18
+ self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)
19
+
20
+ self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)
21
+
22
+ self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)
23
+ self.norm = BatchNorm3d(compress, affine=True)
24
+
25
+ if estimate_occlusion_map:
26
+ # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3)
27
+ self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)
28
+ else:
29
+ self.occlusion = None
30
+
31
+ self.num_kp = num_kp
32
+
33
+
34
+ def create_sparse_motions(self, feature, kp_driving, kp_source):
35
+ bs, _, d, h, w = feature.shape
36
+ identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type())
37
+ identity_grid = identity_grid.view(1, 1, d, h, w, 3)
38
+ coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3)
39
+
40
+ k = coordinate_grid.shape[1]
41
+
42
+ # if 'jacobian' in kp_driving:
43
+ if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None:
44
+ jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))
45
+ jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)
46
+ jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1)
47
+ coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))
48
+ coordinate_grid = coordinate_grid.squeeze(-1)
49
+ '''
50
+ if 'rot' in kp_driving:
51
+ rot_s = kp_source['rot']
52
+ rot_d = kp_driving['rot']
53
+ rot = torch.einsum('bij, bjk->bki', rot_s, torch.inverse(rot_d))
54
+ rot = rot.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)
55
+ rot = rot.repeat(1, k, d, h, w, 1, 1)
56
+ # print(rot.shape)
57
+ coordinate_grid = torch.matmul(rot, coordinate_grid.unsqueeze(-1))
58
+ coordinate_grid = coordinate_grid.squeeze(-1)
59
+ # print(coordinate_grid.shape)
60
+ '''
61
+ driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)
62
+
63
+ #adding background feature
64
+ identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)
65
+ sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)
66
+
67
+ # sparse_motions = driving_to_source
68
+
69
+ return sparse_motions
70
+
71
+ def create_deformed_feature(self, feature, sparse_motions):
72
+ bs, _, d, h, w = feature.shape
73
+ feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)
74
+ feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)
75
+ sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3)
76
+ sparse_deformed = F.grid_sample(feature_repeat, sparse_motions)
77
+ sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)
78
+ return sparse_deformed
79
+
80
+ def create_heatmap_representations(self, feature, kp_driving, kp_source):
81
+ spatial_size = feature.shape[3:]
82
+ gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)
83
+ gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)
84
+ heatmap = gaussian_driving - gaussian_source
85
+
86
+ # adding background feature
87
+ zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type())
88
+ heatmap = torch.cat([zeros, heatmap], dim=1)
89
+ heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
90
+ return heatmap
91
+
92
+ def forward(self, feature, kp_driving, kp_source):
93
+ bs, _, d, h, w = feature.shape
94
+
95
+ feature = self.compress(feature)
96
+ feature = self.norm(feature)
97
+ feature = F.relu(feature)
98
+
99
+ out_dict = dict()
100
+ sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)
101
+ deformed_feature = self.create_deformed_feature(feature, sparse_motion)
102
+
103
+ heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)
104
+
105
+ input = torch.cat([heatmap, deformed_feature], dim=2)
106
+ input = input.view(bs, -1, d, h, w)
107
+
108
+ # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w)
109
+
110
+ prediction = self.hourglass(input)
111
+
112
+ mask = self.mask(prediction)
113
+ mask = F.softmax(mask, dim=1)
114
+ out_dict['mask'] = mask
115
+ mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)
116
+ sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)
117
+ deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w)
118
+ deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)
119
+
120
+ out_dict['deformation'] = deformation
121
+
122
+ if self.occlusion:
123
+ bs, c, d, h, w = prediction.shape
124
+ prediction = prediction.view(bs, -1, h, w)
125
+ occlusion_map = torch.sigmoid(self.occlusion(prediction))
126
+ out_dict['occlusion_map'] = occlusion_map
127
+
128
+ return out_dict