Spaces:
Running
Running
imseldrith
commited on
Commit
•
1d10d42
1
Parent(s):
61e0dab
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .assets/models/GFPGANv1.4.pth +3 -0
- .assets/models/RealESRGAN_x4plus.pth +3 -0
- .assets/models/inswapper_128.onnx +3 -0
- .editorconfig +8 -0
- .flake8 +3 -0
- .gitattributes +1 -0
- .github/FUNDING.yml +2 -0
- .github/preview.png +3 -0
- .github/workflows/ci.yml +34 -0
- .gitignore +4 -0
- 1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg +0 -0
- 1685074910001_vtqikl_2_0-images 2.jpeg +0 -0
- 1685074910001_vtqikl_2_0-images.jpg +0 -0
- DeepFakeAI/__init__.py +0 -0
- DeepFakeAI/__pycache__/__init__.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/capturer.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/choices.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/core.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/face_reference.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/globals.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/metadata.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/predictor.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/typing.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/utilities.cpython-310.pyc +0 -0
- DeepFakeAI/__pycache__/wording.cpython-310.pyc +0 -0
- DeepFakeAI/capturer.py +22 -0
- DeepFakeAI/choices.py +10 -0
- DeepFakeAI/core.py +222 -0
- DeepFakeAI/face_analyser.py +106 -0
- DeepFakeAI/face_reference.py +21 -0
- DeepFakeAI/globals.py +30 -0
- DeepFakeAI/metadata.py +13 -0
- DeepFakeAI/predictor.py +43 -0
- DeepFakeAI/processors/__init__.py +0 -0
- DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/__init__.py +0 -0
- DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/core.py +113 -0
- DeepFakeAI/processors/frame/modules/__init__.py +0 -0
- DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc +0 -0
- DeepFakeAI/processors/frame/modules/face_enhancer.py +100 -0
- DeepFakeAI/processors/frame/modules/face_swapper.py +105 -0
- DeepFakeAI/processors/frame/modules/frame_enhancer.py +88 -0
- DeepFakeAI/typing.py +13 -0
- DeepFakeAI/uis/__init__.py +0 -0
.assets/models/GFPGANv1.4.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e2cd4703ab14f4d01fd1383a8a8b266f9a5833dacee8e6a79d3bf21a1b6be5ad
|
3 |
+
size 348632874
|
.assets/models/RealESRGAN_x4plus.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fa0d38905f75ac06eb49a7951b426670021be3018265fd191d2125df9d682f1
|
3 |
+
size 67040989
|
.assets/models/inswapper_128.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e4a3f08c753cb72d04e10aa0f7dbe3deebbf39567d4ead6dce08e98aa49e16af
|
3 |
+
size 554253681
|
.editorconfig
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
root = true
|
2 |
+
|
3 |
+
[*]
|
4 |
+
end_of_line = lf
|
5 |
+
insert_final_newline = true
|
6 |
+
indent_size = 4
|
7 |
+
indent_style = tab
|
8 |
+
trim_trailing_whitespace = true
|
.flake8
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[flake8]
|
2 |
+
select = E3, E4, F
|
3 |
+
per-file-ignores = facefusion/core.py:E402,F401
|
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
.github/preview.png filter=lfs diff=lfs merge=lfs -text
|
.github/FUNDING.yml
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
github: henryruhs
|
2 |
+
custom: https://paypal.me/henryruhs
|
.github/preview.png
ADDED
Git LFS Details
|
.github/workflows/ci.yml
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: ci
|
2 |
+
|
3 |
+
on: [ push, pull_request ]
|
4 |
+
|
5 |
+
jobs:
|
6 |
+
lint:
|
7 |
+
runs-on: ubuntu-latest
|
8 |
+
steps:
|
9 |
+
- name: Checkout
|
10 |
+
uses: actions/checkout@v2
|
11 |
+
- name: Set up Python 3.10
|
12 |
+
uses: actions/setup-python@v2
|
13 |
+
with:
|
14 |
+
python-version: '3.10'
|
15 |
+
- run: pip install flake8
|
16 |
+
- run: pip install mypy
|
17 |
+
- run: flake8 run.py facefusion tests
|
18 |
+
- run: mypy run.py facefusion tests
|
19 |
+
test:
|
20 |
+
strategy:
|
21 |
+
matrix:
|
22 |
+
os: [ macos-latest, ubuntu-latest, windows-latest ]
|
23 |
+
runs-on: ${{ matrix.os }}
|
24 |
+
steps:
|
25 |
+
- name: Checkout
|
26 |
+
uses: actions/checkout@v2
|
27 |
+
- name: Set up ffmpeg
|
28 |
+
uses: FedericoCarboni/setup-ffmpeg@v2
|
29 |
+
- name: Set up Python 3.10
|
30 |
+
uses: actions/setup-python@v2
|
31 |
+
with:
|
32 |
+
python-version: '3.10'
|
33 |
+
- run: pip install -r requirements-ci.txt
|
34 |
+
- run: pytest
|
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.assets
|
2 |
+
.idea
|
3 |
+
.vscode
|
4 |
+
|
1685074910001_vtqikl_2_0-boy-dp-image-77-720x704.jpg
ADDED
1685074910001_vtqikl_2_0-images 2.jpeg
ADDED
1685074910001_vtqikl_2_0-images.jpg
ADDED
DeepFakeAI/__init__.py
ADDED
File without changes
|
DeepFakeAI/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (135 Bytes). View file
|
|
DeepFakeAI/__pycache__/capturer.cpython-310.pyc
ADDED
Binary file (865 Bytes). View file
|
|
DeepFakeAI/__pycache__/choices.cpython-310.pyc
ADDED
Binary file (877 Bytes). View file
|
|
DeepFakeAI/__pycache__/core.cpython-310.pyc
ADDED
Binary file (8.87 kB). View file
|
|
DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc
ADDED
Binary file (3.86 kB). View file
|
|
DeepFakeAI/__pycache__/face_reference.cpython-310.pyc
ADDED
Binary file (647 Bytes). View file
|
|
DeepFakeAI/__pycache__/globals.cpython-310.pyc
ADDED
Binary file (1.47 kB). View file
|
|
DeepFakeAI/__pycache__/metadata.cpython-310.pyc
ADDED
Binary file (462 Bytes). View file
|
|
DeepFakeAI/__pycache__/predictor.cpython-310.pyc
ADDED
Binary file (1.66 kB). View file
|
|
DeepFakeAI/__pycache__/typing.cpython-310.pyc
ADDED
Binary file (674 Bytes). View file
|
|
DeepFakeAI/__pycache__/utilities.cpython-310.pyc
ADDED
Binary file (7.87 kB). View file
|
|
DeepFakeAI/__pycache__/wording.cpython-310.pyc
ADDED
Binary file (5.41 kB). View file
|
|
DeepFakeAI/capturer.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
from facefusion.typing import Frame
|
5 |
+
|
6 |
+
|
7 |
+
def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]:
|
8 |
+
capture = cv2.VideoCapture(video_path)
|
9 |
+
frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
|
10 |
+
capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
|
11 |
+
has_frame, frame = capture.read()
|
12 |
+
capture.release()
|
13 |
+
if has_frame:
|
14 |
+
return frame
|
15 |
+
return None
|
16 |
+
|
17 |
+
|
18 |
+
def get_video_frame_total(video_path : str) -> int:
|
19 |
+
capture = cv2.VideoCapture(video_path)
|
20 |
+
video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
|
21 |
+
capture.release()
|
22 |
+
return video_frame_total
|
DeepFakeAI/choices.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
|
4 |
+
|
5 |
+
face_recognition : List[FaceRecognition] = [ 'reference', 'many' ]
|
6 |
+
face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']
|
7 |
+
face_analyser_age : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ]
|
8 |
+
face_analyser_gender : List[FaceAnalyserGender] = [ 'male', 'female' ]
|
9 |
+
temp_frame_format : List[TempFrameFormat] = [ 'jpg', 'png' ]
|
10 |
+
output_video_encoder : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
|
DeepFakeAI/core.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
|
3 |
+
import os
|
4 |
+
# single thread doubles cuda performance
|
5 |
+
os.environ['OMP_NUM_THREADS'] = '1'
|
6 |
+
# reduce tensorflow log level
|
7 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
8 |
+
import sys
|
9 |
+
import warnings
|
10 |
+
from typing import List
|
11 |
+
import platform
|
12 |
+
import signal
|
13 |
+
import shutil
|
14 |
+
import argparse
|
15 |
+
import onnxruntime
|
16 |
+
import tensorflow
|
17 |
+
|
18 |
+
import facefusion.choices
|
19 |
+
import facefusion.globals
|
20 |
+
from facefusion import wording, metadata
|
21 |
+
from facefusion.predictor import predict_image, predict_video
|
22 |
+
from facefusion.processors.frame.core import get_frame_processors_modules
|
23 |
+
from facefusion.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers
|
24 |
+
|
25 |
+
warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
|
26 |
+
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
|
27 |
+
|
28 |
+
|
29 |
+
def parse_args() -> None:
|
30 |
+
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
|
31 |
+
program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120))
|
32 |
+
program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
|
33 |
+
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
|
34 |
+
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
|
35 |
+
program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('facefusion/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+')
|
36 |
+
program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+')
|
37 |
+
program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true')
|
38 |
+
program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true')
|
39 |
+
program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true')
|
40 |
+
program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = facefusion.choices.face_recognition)
|
41 |
+
program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = facefusion.choices.face_analyser_direction)
|
42 |
+
program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_age)
|
43 |
+
program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_gender)
|
44 |
+
program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
|
45 |
+
program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5)
|
46 |
+
program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
|
47 |
+
program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
|
48 |
+
program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
|
49 |
+
program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_format)
|
50 |
+
program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
|
51 |
+
program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoder)
|
52 |
+
program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]')
|
53 |
+
program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int)
|
54 |
+
program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+')
|
55 |
+
program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default())
|
56 |
+
program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1)
|
57 |
+
program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version'))
|
58 |
+
|
59 |
+
args = program.parse_args()
|
60 |
+
|
61 |
+
facefusion.globals.source_path = args.source_path
|
62 |
+
facefusion.globals.target_path = args.target_path
|
63 |
+
facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
|
64 |
+
facefusion.globals.headless = facefusion.globals.source_path is not None and facefusion.globals.target_path is not None and facefusion.globals.output_path is not None
|
65 |
+
facefusion.globals.frame_processors = args.frame_processors
|
66 |
+
facefusion.globals.ui_layouts = args.ui_layouts
|
67 |
+
facefusion.globals.keep_fps = args.keep_fps
|
68 |
+
facefusion.globals.keep_temp = args.keep_temp
|
69 |
+
facefusion.globals.skip_audio = args.skip_audio
|
70 |
+
facefusion.globals.face_recognition = args.face_recognition
|
71 |
+
facefusion.globals.face_analyser_direction = args.face_analyser_direction
|
72 |
+
facefusion.globals.face_analyser_age = args.face_analyser_age
|
73 |
+
facefusion.globals.face_analyser_gender = args.face_analyser_gender
|
74 |
+
facefusion.globals.reference_face_position = args.reference_face_position
|
75 |
+
facefusion.globals.reference_frame_number = args.reference_frame_number
|
76 |
+
facefusion.globals.reference_face_distance = args.reference_face_distance
|
77 |
+
facefusion.globals.trim_frame_start = args.trim_frame_start
|
78 |
+
facefusion.globals.trim_frame_end = args.trim_frame_end
|
79 |
+
facefusion.globals.temp_frame_format = args.temp_frame_format
|
80 |
+
facefusion.globals.temp_frame_quality = args.temp_frame_quality
|
81 |
+
facefusion.globals.output_video_encoder = args.output_video_encoder
|
82 |
+
facefusion.globals.output_video_quality = args.output_video_quality
|
83 |
+
facefusion.globals.max_memory = args.max_memory
|
84 |
+
facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
|
85 |
+
facefusion.globals.execution_thread_count = args.execution_thread_count
|
86 |
+
facefusion.globals.execution_queue_count = args.execution_queue_count
|
87 |
+
|
88 |
+
|
89 |
+
def suggest_execution_providers_choices() -> List[str]:
|
90 |
+
return encode_execution_providers(onnxruntime.get_available_providers())
|
91 |
+
|
92 |
+
|
93 |
+
def suggest_execution_thread_count_default() -> int:
|
94 |
+
if 'CUDAExecutionProvider' in onnxruntime.get_available_providers():
|
95 |
+
return 8
|
96 |
+
return 1
|
97 |
+
|
98 |
+
|
99 |
+
def limit_resources() -> None:
|
100 |
+
# prevent tensorflow memory leak
|
101 |
+
gpus = tensorflow.config.experimental.list_physical_devices('GPU')
|
102 |
+
for gpu in gpus:
|
103 |
+
tensorflow.config.experimental.set_virtual_device_configuration(gpu, [
|
104 |
+
tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024)
|
105 |
+
])
|
106 |
+
# limit memory usage
|
107 |
+
if facefusion.globals.max_memory:
|
108 |
+
memory = facefusion.globals.max_memory * 1024 ** 3
|
109 |
+
if platform.system().lower() == 'darwin':
|
110 |
+
memory = facefusion.globals.max_memory * 1024 ** 6
|
111 |
+
if platform.system().lower() == 'windows':
|
112 |
+
import ctypes
|
113 |
+
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
|
114 |
+
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
|
115 |
+
else:
|
116 |
+
import resource
|
117 |
+
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
|
118 |
+
|
119 |
+
|
120 |
+
def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None:
|
121 |
+
print('[' + scope + '] ' + message)
|
122 |
+
|
123 |
+
|
124 |
+
def pre_check() -> bool:
|
125 |
+
if sys.version_info < (3, 10):
|
126 |
+
update_status(wording.get('python_not_supported').format(version = '3.10'))
|
127 |
+
return False
|
128 |
+
if not shutil.which('ffmpeg'):
|
129 |
+
update_status(wording.get('ffmpeg_not_installed'))
|
130 |
+
return False
|
131 |
+
return True
|
132 |
+
|
133 |
+
|
134 |
+
def process_image() -> None:
|
135 |
+
if predict_image(facefusion.globals.target_path):
|
136 |
+
return
|
137 |
+
shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
|
138 |
+
# process frame
|
139 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
140 |
+
update_status(wording.get('processing'), frame_processor_module.NAME)
|
141 |
+
frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
|
142 |
+
frame_processor_module.post_process()
|
143 |
+
# validate image
|
144 |
+
if is_image(facefusion.globals.target_path):
|
145 |
+
update_status(wording.get('processing_image_succeed'))
|
146 |
+
else:
|
147 |
+
update_status(wording.get('processing_image_failed'))
|
148 |
+
|
149 |
+
|
150 |
+
def process_video() -> None:
|
151 |
+
if predict_video(facefusion.globals.target_path):
|
152 |
+
return
|
153 |
+
fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
|
154 |
+
update_status(wording.get('creating_temp'))
|
155 |
+
create_temp(facefusion.globals.target_path)
|
156 |
+
# extract frames
|
157 |
+
update_status(wording.get('extracting_frames_fps').format(fps = fps))
|
158 |
+
extract_frames(facefusion.globals.target_path, fps)
|
159 |
+
# process frame
|
160 |
+
temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
|
161 |
+
if temp_frame_paths:
|
162 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
163 |
+
update_status(wording.get('processing'), frame_processor_module.NAME)
|
164 |
+
frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
|
165 |
+
frame_processor_module.post_process()
|
166 |
+
else:
|
167 |
+
update_status(wording.get('temp_frames_not_found'))
|
168 |
+
return
|
169 |
+
# create video
|
170 |
+
update_status(wording.get('creating_video_fps').format(fps = fps))
|
171 |
+
if not create_video(facefusion.globals.target_path, fps):
|
172 |
+
update_status(wording.get('creating_video_failed'))
|
173 |
+
return
|
174 |
+
# handle audio
|
175 |
+
if facefusion.globals.skip_audio:
|
176 |
+
update_status(wording.get('skipping_audio'))
|
177 |
+
move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
|
178 |
+
else:
|
179 |
+
update_status(wording.get('restoring_audio'))
|
180 |
+
restore_audio(facefusion.globals.target_path, facefusion.globals.output_path)
|
181 |
+
# clear temp
|
182 |
+
update_status(wording.get('clearing_temp'))
|
183 |
+
clear_temp(facefusion.globals.target_path)
|
184 |
+
# validate video
|
185 |
+
if is_video(facefusion.globals.target_path):
|
186 |
+
update_status(wording.get('processing_video_succeed'))
|
187 |
+
else:
|
188 |
+
update_status(wording.get('processing_video_failed'))
|
189 |
+
|
190 |
+
|
191 |
+
def conditional_process() -> None:
|
192 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
193 |
+
if not frame_processor_module.pre_process():
|
194 |
+
return
|
195 |
+
if is_image(facefusion.globals.target_path):
|
196 |
+
process_image()
|
197 |
+
if is_video(facefusion.globals.target_path):
|
198 |
+
process_video()
|
199 |
+
|
200 |
+
|
201 |
+
def run() -> None:
|
202 |
+
parse_args()
|
203 |
+
limit_resources()
|
204 |
+
# pre check
|
205 |
+
if not pre_check():
|
206 |
+
return
|
207 |
+
for frame_processor in get_frame_processors_modules(facefusion.globals.frame_processors):
|
208 |
+
if not frame_processor.pre_check():
|
209 |
+
return
|
210 |
+
# process or launch
|
211 |
+
if facefusion.globals.headless:
|
212 |
+
conditional_process()
|
213 |
+
else:
|
214 |
+
import facefusion.uis.core as ui
|
215 |
+
|
216 |
+
ui.launch()
|
217 |
+
|
218 |
+
|
219 |
+
def destroy() -> None:
|
220 |
+
if facefusion.globals.target_path:
|
221 |
+
clear_temp(facefusion.globals.target_path)
|
222 |
+
sys.exit()
|
DeepFakeAI/face_analyser.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import threading
|
2 |
+
from typing import Any, Optional, List
|
3 |
+
import insightface
|
4 |
+
import numpy
|
5 |
+
|
6 |
+
import facefusion.globals
|
7 |
+
from facefusion.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender
|
8 |
+
|
9 |
+
FACE_ANALYSER = None
|
10 |
+
THREAD_LOCK = threading.Lock()
|
11 |
+
|
12 |
+
|
13 |
+
def get_face_analyser() -> Any:
|
14 |
+
global FACE_ANALYSER
|
15 |
+
|
16 |
+
with THREAD_LOCK:
|
17 |
+
if FACE_ANALYSER is None:
|
18 |
+
FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = facefusion.globals.execution_providers)
|
19 |
+
FACE_ANALYSER.prepare(ctx_id = 0)
|
20 |
+
return FACE_ANALYSER
|
21 |
+
|
22 |
+
|
23 |
+
def clear_face_analyser() -> Any:
|
24 |
+
global FACE_ANALYSER
|
25 |
+
|
26 |
+
FACE_ANALYSER = None
|
27 |
+
|
28 |
+
|
29 |
+
def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
|
30 |
+
many_faces = get_many_faces(frame)
|
31 |
+
if many_faces:
|
32 |
+
try:
|
33 |
+
return many_faces[position]
|
34 |
+
except IndexError:
|
35 |
+
return many_faces[-1]
|
36 |
+
return None
|
37 |
+
|
38 |
+
|
39 |
+
def get_many_faces(frame : Frame) -> List[Face]:
|
40 |
+
try:
|
41 |
+
faces = get_face_analyser().get(frame)
|
42 |
+
if facefusion.globals.face_analyser_direction:
|
43 |
+
faces = sort_by_direction(faces, facefusion.globals.face_analyser_direction)
|
44 |
+
if facefusion.globals.face_analyser_age:
|
45 |
+
faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
|
46 |
+
if facefusion.globals.face_analyser_gender:
|
47 |
+
faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
|
48 |
+
return faces
|
49 |
+
except (AttributeError, ValueError):
|
50 |
+
return []
|
51 |
+
|
52 |
+
|
53 |
+
def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]:
|
54 |
+
many_faces = get_many_faces(frame)
|
55 |
+
similar_faces = []
|
56 |
+
if many_faces:
|
57 |
+
for face in many_faces:
|
58 |
+
if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'):
|
59 |
+
current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding))
|
60 |
+
if current_face_distance < face_distance:
|
61 |
+
similar_faces.append(face)
|
62 |
+
return similar_faces
|
63 |
+
|
64 |
+
|
65 |
+
def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]:
|
66 |
+
if direction == 'left-right':
|
67 |
+
return sorted(faces, key = lambda face: face['bbox'][0])
|
68 |
+
if direction == 'right-left':
|
69 |
+
return sorted(faces, key = lambda face: face['bbox'][0], reverse = True)
|
70 |
+
if direction == 'top-bottom':
|
71 |
+
return sorted(faces, key = lambda face: face['bbox'][1])
|
72 |
+
if direction == 'bottom-top':
|
73 |
+
return sorted(faces, key = lambda face: face['bbox'][1], reverse = True)
|
74 |
+
if direction == 'small-large':
|
75 |
+
return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]))
|
76 |
+
if direction == 'large-small':
|
77 |
+
return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True)
|
78 |
+
return faces
|
79 |
+
|
80 |
+
|
81 |
+
def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]:
|
82 |
+
filter_faces = []
|
83 |
+
for face in faces:
|
84 |
+
if face['age'] < 13 and age == 'child':
|
85 |
+
filter_faces.append(face)
|
86 |
+
elif face['age'] < 19 and age == 'teen':
|
87 |
+
filter_faces.append(face)
|
88 |
+
elif face['age'] < 60 and age == 'adult':
|
89 |
+
filter_faces.append(face)
|
90 |
+
elif face['age'] > 59 and age == 'senior':
|
91 |
+
filter_faces.append(face)
|
92 |
+
return filter_faces
|
93 |
+
|
94 |
+
|
95 |
+
def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]:
|
96 |
+
filter_faces = []
|
97 |
+
for face in faces:
|
98 |
+
if face['gender'] == 1 and gender == 'male':
|
99 |
+
filter_faces.append(face)
|
100 |
+
if face['gender'] == 0 and gender == 'female':
|
101 |
+
filter_faces.append(face)
|
102 |
+
return filter_faces
|
103 |
+
|
104 |
+
|
105 |
+
def get_faces_total(frame : Frame) -> int:
|
106 |
+
return len(get_many_faces(frame))
|
DeepFakeAI/face_reference.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
from facefusion.typing import Face
|
4 |
+
|
5 |
+
FACE_REFERENCE = None
|
6 |
+
|
7 |
+
|
8 |
+
def get_face_reference() -> Optional[Face]:
|
9 |
+
return FACE_REFERENCE
|
10 |
+
|
11 |
+
|
12 |
+
def set_face_reference(face : Face) -> None:
|
13 |
+
global FACE_REFERENCE
|
14 |
+
|
15 |
+
FACE_REFERENCE = face
|
16 |
+
|
17 |
+
|
18 |
+
def clear_face_reference() -> None:
|
19 |
+
global FACE_REFERENCE
|
20 |
+
|
21 |
+
FACE_REFERENCE = None
|
DeepFakeAI/globals.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
|
3 |
+
from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat
|
4 |
+
|
5 |
+
source_path : Optional[str] = None
|
6 |
+
target_path : Optional[str] = None
|
7 |
+
output_path : Optional[str] = None
|
8 |
+
headless : Optional[bool] = None
|
9 |
+
frame_processors : List[str] = []
|
10 |
+
ui_layouts : List[str] = []
|
11 |
+
keep_fps : Optional[bool] = None
|
12 |
+
keep_temp : Optional[bool] = None
|
13 |
+
skip_audio : Optional[bool] = None
|
14 |
+
face_recognition : Optional[FaceRecognition] = None
|
15 |
+
face_analyser_direction : Optional[FaceAnalyserDirection] = None
|
16 |
+
face_analyser_age : Optional[FaceAnalyserAge] = None
|
17 |
+
face_analyser_gender : Optional[FaceAnalyserGender] = None
|
18 |
+
reference_face_position : Optional[int] = None
|
19 |
+
reference_frame_number : Optional[int] = None
|
20 |
+
reference_face_distance : Optional[float] = None
|
21 |
+
trim_frame_start : Optional[int] = None
|
22 |
+
trim_frame_end : Optional[int] = None
|
23 |
+
temp_frame_format : Optional[TempFrameFormat] = None
|
24 |
+
temp_frame_quality : Optional[int] = None
|
25 |
+
output_video_encoder : Optional[str] = None
|
26 |
+
output_video_quality : Optional[int] = None
|
27 |
+
max_memory : Optional[int] = None
|
28 |
+
execution_providers : List[str] = []
|
29 |
+
execution_thread_count : Optional[int] = None
|
30 |
+
execution_queue_count : Optional[int] = None
|
DeepFakeAI/metadata.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
METADATA =\
|
2 |
+
{
|
3 |
+
'name': 'FaceFusion',
|
4 |
+
'description': 'Next generation face swapper and enhancer',
|
5 |
+
'version': '1.0.0',
|
6 |
+
'license': 'MIT',
|
7 |
+
'author': 'Henry Ruhs',
|
8 |
+
'url': 'https://facefusion.io'
|
9 |
+
}
|
10 |
+
|
11 |
+
|
12 |
+
def get(key : str) -> str:
|
13 |
+
return METADATA[key]
|
DeepFakeAI/predictor.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import threading
|
2 |
+
import numpy
|
3 |
+
import opennsfw2
|
4 |
+
from PIL import Image
|
5 |
+
from keras import Model
|
6 |
+
|
7 |
+
from facefusion.typing import Frame
|
8 |
+
|
9 |
+
PREDICTOR = None
|
10 |
+
THREAD_LOCK = threading.Lock()
|
11 |
+
MAX_PROBABILITY = 0.75
|
12 |
+
|
13 |
+
|
14 |
+
def get_predictor() -> Model:
|
15 |
+
global PREDICTOR
|
16 |
+
|
17 |
+
with THREAD_LOCK:
|
18 |
+
if PREDICTOR is None:
|
19 |
+
PREDICTOR = opennsfw2.make_open_nsfw_model()
|
20 |
+
return PREDICTOR
|
21 |
+
|
22 |
+
|
23 |
+
def clear_predictor() -> None:
|
24 |
+
global PREDICTOR
|
25 |
+
|
26 |
+
PREDICTOR = None
|
27 |
+
|
28 |
+
|
29 |
+
def predict_frame(target_frame : Frame) -> bool:
|
30 |
+
image = Image.fromarray(target_frame)
|
31 |
+
image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO)
|
32 |
+
views = numpy.expand_dims(image, axis = 0)
|
33 |
+
_, probability = get_predictor().predict(views)[0]
|
34 |
+
return probability > MAX_PROBABILITY
|
35 |
+
|
36 |
+
|
37 |
+
def predict_image(target_path : str) -> bool:
|
38 |
+
return opennsfw2.predict_image(target_path) > MAX_PROBABILITY
|
39 |
+
|
40 |
+
|
41 |
+
def predict_video(target_path : str) -> bool:
|
42 |
+
_, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100)
|
43 |
+
return any(probability > MAX_PROBABILITY for probability in probabilities)
|
DeepFakeAI/processors/__init__.py
ADDED
File without changes
|
DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (146 Bytes). View file
|
|
DeepFakeAI/processors/frame/__init__.py
ADDED
File without changes
|
DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (152 Bytes). View file
|
|
DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc
ADDED
Binary file (4.06 kB). View file
|
|
DeepFakeAI/processors/frame/core.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import importlib
|
4 |
+
import psutil
|
5 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
6 |
+
from queue import Queue
|
7 |
+
from types import ModuleType
|
8 |
+
from typing import Any, List, Callable
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
import facefusion.globals
|
12 |
+
from facefusion import wording
|
13 |
+
|
14 |
+
FRAME_PROCESSORS_MODULES : List[ModuleType] = []
|
15 |
+
FRAME_PROCESSORS_METHODS =\
|
16 |
+
[
|
17 |
+
'get_frame_processor',
|
18 |
+
'clear_frame_processor',
|
19 |
+
'pre_check',
|
20 |
+
'pre_process',
|
21 |
+
'process_frame',
|
22 |
+
'process_frames',
|
23 |
+
'process_image',
|
24 |
+
'process_video',
|
25 |
+
'post_process'
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
def load_frame_processor_module(frame_processor : str) -> Any:
|
30 |
+
try:
|
31 |
+
frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
|
32 |
+
for method_name in FRAME_PROCESSORS_METHODS:
|
33 |
+
if not hasattr(frame_processor_module, method_name):
|
34 |
+
raise NotImplementedError
|
35 |
+
except ModuleNotFoundError:
|
36 |
+
sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor))
|
37 |
+
except NotImplementedError:
|
38 |
+
sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor))
|
39 |
+
return frame_processor_module
|
40 |
+
|
41 |
+
|
42 |
+
def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]:
|
43 |
+
global FRAME_PROCESSORS_MODULES
|
44 |
+
|
45 |
+
if not FRAME_PROCESSORS_MODULES:
|
46 |
+
for frame_processor in frame_processors:
|
47 |
+
frame_processor_module = load_frame_processor_module(frame_processor)
|
48 |
+
FRAME_PROCESSORS_MODULES.append(frame_processor_module)
|
49 |
+
return FRAME_PROCESSORS_MODULES
|
50 |
+
|
51 |
+
|
52 |
+
def clear_frame_processors_modules() -> None:
|
53 |
+
global FRAME_PROCESSORS_MODULES
|
54 |
+
|
55 |
+
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
|
56 |
+
frame_processor_module.clear_frame_processor()
|
57 |
+
FRAME_PROCESSORS_MODULES = []
|
58 |
+
|
59 |
+
|
60 |
+
def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
|
61 |
+
with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
|
62 |
+
futures = []
|
63 |
+
queue = create_queue(temp_frame_paths)
|
64 |
+
queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
|
65 |
+
while not queue.empty():
|
66 |
+
future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
|
67 |
+
futures.append(future)
|
68 |
+
for future in as_completed(futures):
|
69 |
+
future.result()
|
70 |
+
|
71 |
+
|
72 |
+
def create_queue(temp_frame_paths : List[str]) -> Queue[str]:
|
73 |
+
queue: Queue[str] = Queue()
|
74 |
+
for frame_path in temp_frame_paths:
|
75 |
+
queue.put(frame_path)
|
76 |
+
return queue
|
77 |
+
|
78 |
+
|
79 |
+
def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]:
|
80 |
+
queues = []
|
81 |
+
for _ in range(queue_per_future):
|
82 |
+
if not queue.empty():
|
83 |
+
queues.append(queue.get())
|
84 |
+
return queues
|
85 |
+
|
86 |
+
|
87 |
+
def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None:
|
88 |
+
progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
|
89 |
+
total = len(frame_paths)
|
90 |
+
with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress:
|
91 |
+
multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress))
|
92 |
+
|
93 |
+
|
94 |
+
def update_progress(progress : Any = None) -> None:
|
95 |
+
process = psutil.Process(os.getpid())
|
96 |
+
memory_usage = process.memory_info().rss / 1024 / 1024 / 1024
|
97 |
+
progress.set_postfix(
|
98 |
+
{
|
99 |
+
'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
|
100 |
+
'execution_providers': facefusion.globals.execution_providers,
|
101 |
+
'execution_thread_count': facefusion.globals.execution_thread_count,
|
102 |
+
'execution_queue_count': facefusion.globals.execution_queue_count
|
103 |
+
})
|
104 |
+
progress.refresh()
|
105 |
+
progress.update(1)
|
106 |
+
|
107 |
+
|
108 |
+
def get_device() -> str:
|
109 |
+
if 'CUDAExecutionProvider' in facefusion.globals.execution_providers:
|
110 |
+
return 'cuda'
|
111 |
+
if 'CoreMLExecutionProvider' in facefusion.globals.execution_providers:
|
112 |
+
return 'mps'
|
113 |
+
return 'cpu'
|
DeepFakeAI/processors/frame/modules/__init__.py
ADDED
File without changes
|
DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (160 Bytes). View file
|
|
DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc
ADDED
Binary file (3.6 kB). View file
|
|
DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc
ADDED
Binary file (4.21 kB). View file
|
|
DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc
ADDED
Binary file (3.09 kB). View file
|
|
DeepFakeAI/processors/frame/modules/face_enhancer.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Callable
|
2 |
+
import cv2
|
3 |
+
import threading
|
4 |
+
from gfpgan.utils import GFPGANer
|
5 |
+
|
6 |
+
import facefusion.globals
|
7 |
+
import facefusion.processors.frame.core as frame_processors
|
8 |
+
from facefusion import wording
|
9 |
+
from facefusion.core import update_status
|
10 |
+
from facefusion.face_analyser import get_many_faces
|
11 |
+
from facefusion.typing import Frame, Face
|
12 |
+
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
13 |
+
|
14 |
+
FRAME_PROCESSOR = None
|
15 |
+
THREAD_SEMAPHORE = threading.Semaphore()
|
16 |
+
THREAD_LOCK = threading.Lock()
|
17 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER'
|
18 |
+
|
19 |
+
|
20 |
+
def get_frame_processor() -> Any:
|
21 |
+
global FRAME_PROCESSOR
|
22 |
+
|
23 |
+
with THREAD_LOCK:
|
24 |
+
if FRAME_PROCESSOR is None:
|
25 |
+
model_path = resolve_relative_path('../.assets/models/GFPGANv1.4.pth')
|
26 |
+
FRAME_PROCESSOR = GFPGANer(
|
27 |
+
model_path = model_path,
|
28 |
+
upscale = 1,
|
29 |
+
device = frame_processors.get_device()
|
30 |
+
)
|
31 |
+
return FRAME_PROCESSOR
|
32 |
+
|
33 |
+
|
34 |
+
def clear_frame_processor() -> None:
|
35 |
+
global FRAME_PROCESSOR
|
36 |
+
|
37 |
+
FRAME_PROCESSOR = None
|
38 |
+
|
39 |
+
|
40 |
+
def pre_check() -> bool:
|
41 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
42 |
+
conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.pth'])
|
43 |
+
return True
|
44 |
+
|
45 |
+
|
46 |
+
def pre_process() -> bool:
|
47 |
+
if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
|
48 |
+
update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
49 |
+
return False
|
50 |
+
return True
|
51 |
+
|
52 |
+
|
53 |
+
def post_process() -> None:
|
54 |
+
clear_frame_processor()
|
55 |
+
|
56 |
+
|
57 |
+
def enhance_face(target_face : Face, temp_frame : Frame) -> Frame:
|
58 |
+
start_x, start_y, end_x, end_y = map(int, target_face['bbox'])
|
59 |
+
padding_x = int((end_x - start_x) * 0.5)
|
60 |
+
padding_y = int((end_y - start_y) * 0.5)
|
61 |
+
start_x = max(0, start_x - padding_x)
|
62 |
+
start_y = max(0, start_y - padding_y)
|
63 |
+
end_x = max(0, end_x + padding_x)
|
64 |
+
end_y = max(0, end_y + padding_y)
|
65 |
+
crop_frame = temp_frame[start_y:end_y, start_x:end_x]
|
66 |
+
if crop_frame.size:
|
67 |
+
with THREAD_SEMAPHORE:
|
68 |
+
_, _, crop_frame = get_frame_processor().enhance(
|
69 |
+
crop_frame,
|
70 |
+
paste_back = True
|
71 |
+
)
|
72 |
+
temp_frame[start_y:end_y, start_x:end_x] = crop_frame
|
73 |
+
return temp_frame
|
74 |
+
|
75 |
+
|
76 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
77 |
+
many_faces = get_many_faces(temp_frame)
|
78 |
+
if many_faces:
|
79 |
+
for target_face in many_faces:
|
80 |
+
temp_frame = enhance_face(target_face, temp_frame)
|
81 |
+
return temp_frame
|
82 |
+
|
83 |
+
|
84 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
|
85 |
+
for temp_frame_path in temp_frame_paths:
|
86 |
+
temp_frame = cv2.imread(temp_frame_path)
|
87 |
+
result_frame = process_frame(None, None, temp_frame)
|
88 |
+
cv2.imwrite(temp_frame_path, result_frame)
|
89 |
+
if update:
|
90 |
+
update()
|
91 |
+
|
92 |
+
|
93 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
94 |
+
target_frame = cv2.imread(target_path)
|
95 |
+
result_frame = process_frame(None, None, target_frame)
|
96 |
+
cv2.imwrite(output_path, result_frame)
|
97 |
+
|
98 |
+
|
99 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
100 |
+
facefusion.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
|
DeepFakeAI/processors/frame/modules/face_swapper.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Callable
|
2 |
+
import cv2
|
3 |
+
import insightface
|
4 |
+
import threading
|
5 |
+
|
6 |
+
import facefusion.globals
|
7 |
+
import facefusion.processors.frame.core as frame_processors
|
8 |
+
from facefusion import wording
|
9 |
+
from facefusion.core import update_status
|
10 |
+
from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces
|
11 |
+
from facefusion.face_reference import get_face_reference, set_face_reference
|
12 |
+
from facefusion.typing import Face, Frame
|
13 |
+
from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
14 |
+
|
15 |
+
FRAME_PROCESSOR = None
|
16 |
+
THREAD_LOCK = threading.Lock()
|
17 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER'
|
18 |
+
|
19 |
+
|
20 |
+
def get_frame_processor() -> Any:
|
21 |
+
global FRAME_PROCESSOR
|
22 |
+
|
23 |
+
with THREAD_LOCK:
|
24 |
+
if FRAME_PROCESSOR is None:
|
25 |
+
model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx')
|
26 |
+
FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = facefusion.globals.execution_providers)
|
27 |
+
return FRAME_PROCESSOR
|
28 |
+
|
29 |
+
|
30 |
+
def clear_frame_processor() -> None:
|
31 |
+
global FRAME_PROCESSOR
|
32 |
+
|
33 |
+
FRAME_PROCESSOR = None
|
34 |
+
|
35 |
+
|
36 |
+
def pre_check() -> bool:
|
37 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
38 |
+
conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx'])
|
39 |
+
return True
|
40 |
+
|
41 |
+
|
42 |
+
def pre_process() -> bool:
|
43 |
+
if not is_image(facefusion.globals.source_path):
|
44 |
+
update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
|
45 |
+
return False
|
46 |
+
elif not get_one_face(cv2.imread(facefusion.globals.source_path)):
|
47 |
+
update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
|
48 |
+
return False
|
49 |
+
if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
|
50 |
+
update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
|
51 |
+
return False
|
52 |
+
return True
|
53 |
+
|
54 |
+
|
55 |
+
def post_process() -> None:
|
56 |
+
clear_frame_processor()
|
57 |
+
|
58 |
+
|
59 |
+
def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame:
|
60 |
+
return get_frame_processor().get(temp_frame, target_face, source_face, paste_back = True)
|
61 |
+
|
62 |
+
|
63 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
64 |
+
if 'reference' in facefusion.globals.face_recognition:
|
65 |
+
similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
|
66 |
+
if similar_faces:
|
67 |
+
for similar_face in similar_faces:
|
68 |
+
temp_frame = swap_face(source_face, similar_face, temp_frame)
|
69 |
+
if 'many' in facefusion.globals.face_recognition:
|
70 |
+
many_faces = get_many_faces(temp_frame)
|
71 |
+
if many_faces:
|
72 |
+
for target_face in many_faces:
|
73 |
+
temp_frame = swap_face(source_face, target_face, temp_frame)
|
74 |
+
return temp_frame
|
75 |
+
|
76 |
+
|
77 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
|
78 |
+
source_face = get_one_face(cv2.imread(source_path))
|
79 |
+
reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
|
80 |
+
for temp_frame_path in temp_frame_paths:
|
81 |
+
temp_frame = cv2.imread(temp_frame_path)
|
82 |
+
result_frame = process_frame(source_face, reference_face, temp_frame)
|
83 |
+
cv2.imwrite(temp_frame_path, result_frame)
|
84 |
+
if update:
|
85 |
+
update()
|
86 |
+
|
87 |
+
|
88 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
89 |
+
source_face = get_one_face(cv2.imread(source_path))
|
90 |
+
target_frame = cv2.imread(target_path)
|
91 |
+
reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_recognition else None
|
92 |
+
result_frame = process_frame(source_face, reference_face, target_frame)
|
93 |
+
cv2.imwrite(output_path, result_frame)
|
94 |
+
|
95 |
+
|
96 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
97 |
+
conditional_set_face_reference(temp_frame_paths)
|
98 |
+
frame_processors.process_video(source_path, temp_frame_paths, process_frames)
|
99 |
+
|
100 |
+
|
101 |
+
def conditional_set_face_reference(temp_frame_paths : List[str]) -> None:
|
102 |
+
if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
|
103 |
+
reference_frame = cv2.imread(temp_frame_paths[facefusion.globals.reference_frame_number])
|
104 |
+
reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
|
105 |
+
set_face_reference(reference_face)
|
DeepFakeAI/processors/frame/modules/frame_enhancer.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, List, Callable
|
2 |
+
import cv2
|
3 |
+
import threading
|
4 |
+
from basicsr.archs.rrdbnet_arch import RRDBNet
|
5 |
+
from realesrgan import RealESRGANer
|
6 |
+
|
7 |
+
import facefusion.processors.frame.core as frame_processors
|
8 |
+
from facefusion.typing import Frame, Face
|
9 |
+
from facefusion.utilities import conditional_download, resolve_relative_path
|
10 |
+
|
11 |
+
FRAME_PROCESSOR = None
|
12 |
+
THREAD_SEMAPHORE = threading.Semaphore()
|
13 |
+
THREAD_LOCK = threading.Lock()
|
14 |
+
NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER'
|
15 |
+
|
16 |
+
|
17 |
+
def get_frame_processor() -> Any:
|
18 |
+
global FRAME_PROCESSOR
|
19 |
+
|
20 |
+
with THREAD_LOCK:
|
21 |
+
if FRAME_PROCESSOR is None:
|
22 |
+
model_path = resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth')
|
23 |
+
FRAME_PROCESSOR = RealESRGANer(
|
24 |
+
model_path = model_path,
|
25 |
+
model = RRDBNet(
|
26 |
+
num_in_ch = 3,
|
27 |
+
num_out_ch = 3,
|
28 |
+
num_feat = 64,
|
29 |
+
num_block = 23,
|
30 |
+
num_grow_ch = 32,
|
31 |
+
scale = 4
|
32 |
+
),
|
33 |
+
device = frame_processors.get_device(),
|
34 |
+
tile = 512,
|
35 |
+
tile_pad = 32,
|
36 |
+
pre_pad = 0,
|
37 |
+
scale = 4
|
38 |
+
)
|
39 |
+
return FRAME_PROCESSOR
|
40 |
+
|
41 |
+
|
42 |
+
def clear_frame_processor() -> None:
|
43 |
+
global FRAME_PROCESSOR
|
44 |
+
|
45 |
+
FRAME_PROCESSOR = None
|
46 |
+
|
47 |
+
|
48 |
+
def pre_check() -> bool:
|
49 |
+
download_directory_path = resolve_relative_path('../.assets/models')
|
50 |
+
conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth'])
|
51 |
+
return True
|
52 |
+
|
53 |
+
|
54 |
+
def pre_process() -> bool:
|
55 |
+
return True
|
56 |
+
|
57 |
+
|
58 |
+
def post_process() -> None:
|
59 |
+
clear_frame_processor()
|
60 |
+
|
61 |
+
|
62 |
+
def enhance_frame(temp_frame : Frame) -> Frame:
|
63 |
+
with THREAD_SEMAPHORE:
|
64 |
+
temp_frame, _ = get_frame_processor().enhance(temp_frame, outscale = 1)
|
65 |
+
return temp_frame
|
66 |
+
|
67 |
+
|
68 |
+
def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
|
69 |
+
return enhance_frame(temp_frame)
|
70 |
+
|
71 |
+
|
72 |
+
def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
|
73 |
+
for temp_frame_path in temp_frame_paths:
|
74 |
+
temp_frame = cv2.imread(temp_frame_path)
|
75 |
+
result_frame = process_frame(None, None, temp_frame)
|
76 |
+
cv2.imwrite(temp_frame_path, result_frame)
|
77 |
+
if update:
|
78 |
+
update()
|
79 |
+
|
80 |
+
|
81 |
+
def process_image(source_path : str, target_path : str, output_path : str) -> None:
|
82 |
+
target_frame = cv2.imread(target_path)
|
83 |
+
result = process_frame(None, None, target_frame)
|
84 |
+
cv2.imwrite(output_path, result)
|
85 |
+
|
86 |
+
|
87 |
+
def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
|
88 |
+
frame_processors.process_video(None, temp_frame_paths, process_frames)
|
DeepFakeAI/typing.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Literal
|
2 |
+
from insightface.app.common import Face
|
3 |
+
import numpy
|
4 |
+
|
5 |
+
Face = Face
|
6 |
+
Frame = numpy.ndarray[Any, Any]
|
7 |
+
|
8 |
+
FaceRecognition = Literal[ 'reference', 'many' ]
|
9 |
+
FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ]
|
10 |
+
FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ]
|
11 |
+
FaceAnalyserGender = Literal[ 'male', 'female' ]
|
12 |
+
TempFrameFormat = Literal[ 'jpg', 'png' ]
|
13 |
+
OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ]
|
DeepFakeAI/uis/__init__.py
ADDED
File without changes
|