Spaces:
Running
Running
File size: 18,489 Bytes
21dcd64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
import os
os.environ['OMP_NUM_THREADS'] = '1'
import signal
import ssl
import sys
import warnings
import platform
import shutil
import onnxruntime
from argparse import ArgumentParser, HelpFormatter
import DeepFakeAI.choices
import DeepFakeAI.globals
from DeepFakeAI.face_analyser import get_one_face, get_average_face
from DeepFakeAI.face_store import get_reference_faces, append_reference_face
from DeepFakeAI.vision import get_video_frame, detect_fps, read_image, read_static_images
from DeepFakeAI import face_analyser, face_masker, content_analyser, metadata, logger, wording
from DeepFakeAI.content_analyser import analyse_image, analyse_video
from DeepFakeAI.processors.frame.core import get_frame_processors_modules, load_frame_processor_module
from DeepFakeAI.common_helper import create_metavar
from DeepFakeAI.execution_helper import encode_execution_providers, decode_execution_providers
from DeepFakeAI.normalizer import normalize_output_path, normalize_padding
from DeepFakeAI.filesystem import is_image, is_video, list_module_names, get_temp_frame_paths, create_temp, move_temp, clear_temp
from DeepFakeAI.ffmpeg import extract_frames, compress_image, merge_video, restore_audio
onnxruntime.set_default_logger_severity(3)
warnings.filterwarnings('ignore', category = UserWarning, module = 'gradio')
warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
if platform.system().lower() == 'darwin':
ssl._create_default_https_context = ssl._create_unverified_context
def cli() -> None:
signal.signal(signal.SIGINT, lambda signal_number, frame: destroy())
program = ArgumentParser(formatter_class = lambda prog: HelpFormatter(prog, max_help_position = 120), add_help = False)
# general
program.add_argument('-s', '--source', action = 'append', help = wording.get('source_help'), dest = 'source_paths')
program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
program.add_argument('-v', '--version', version = metadata.get('name') + ' ' + metadata.get('version'), action = 'version')
# misc
group_misc = program.add_argument_group('misc')
group_misc.add_argument('--skip-download', help = wording.get('skip_download_help'), action = 'store_true')
group_misc.add_argument('--headless', help = wording.get('headless_help'), action = 'store_true')
group_misc.add_argument('--log-level', help = wording.get('log_level_help'), default = 'info', choices = logger.get_log_levels())
# execution
execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = ', '.join(execution_providers)), default = [ 'cpu' ], choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), type = int, default = 4, choices = DeepFakeAI.choices.execution_thread_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), type = int, default = 1, choices = DeepFakeAI.choices.execution_queue_count_range, metavar = create_metavar(DeepFakeAI.choices.execution_queue_count_range))
group_execution.add_argument('--max-memory', help = wording.get('max_memory_help'), type = int, choices = DeepFakeAI.choices.max_memory_range, metavar = create_metavar(DeepFakeAI.choices.max_memory_range))
# face analyser
group_face_analyser = program.add_argument_group('face analyser')
group_face_analyser.add_argument('--face-analyser-order', help = wording.get('face_analyser_order_help'), default = 'left-right', choices = DeepFakeAI.choices.face_analyser_orders)
group_face_analyser.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), choices = DeepFakeAI.choices.face_analyser_ages)
group_face_analyser.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), choices = DeepFakeAI.choices.face_analyser_genders)
group_face_analyser.add_argument('--face-detector-model', help = wording.get('face_detector_model_help'), default = 'retinaface', choices = DeepFakeAI.choices.face_detector_models)
group_face_analyser.add_argument('--face-detector-size', help = wording.get('face_detector_size_help'), default = '640x640', choices = DeepFakeAI.choices.face_detector_sizes)
group_face_analyser.add_argument('--face-detector-score', help = wording.get('face_detector_score_help'), type = float, default = 0.5, choices = DeepFakeAI.choices.face_detector_score_range, metavar = create_metavar(DeepFakeAI.choices.face_detector_score_range))
# face selector
group_face_selector = program.add_argument_group('face selector')
group_face_selector.add_argument('--face-selector-mode', help = wording.get('face_selector_mode_help'), default = 'reference', choices = DeepFakeAI.choices.face_selector_modes)
group_face_selector.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), type = int, default = 0)
group_face_selector.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), type = float, default = 0.6, choices = DeepFakeAI.choices.reference_face_distance_range, metavar = create_metavar(DeepFakeAI.choices.reference_face_distance_range))
group_face_selector.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), type = int, default = 0)
# face mask
group_face_mask = program.add_argument_group('face mask')
group_face_mask.add_argument('--face-mask-types', help = wording.get('face_mask_types_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_types)), default = [ 'box' ], choices = DeepFakeAI.choices.face_mask_types, nargs = '+', metavar = 'FACE_MASK_TYPES')
group_face_mask.add_argument('--face-mask-blur', help = wording.get('face_mask_blur_help'), type = float, default = 0.3, choices = DeepFakeAI.choices.face_mask_blur_range, metavar = create_metavar(DeepFakeAI.choices.face_mask_blur_range))
group_face_mask.add_argument('--face-mask-padding', help = wording.get('face_mask_padding_help'), type = int, default = [ 0, 0, 0, 0 ], nargs = '+')
group_face_mask.add_argument('--face-mask-regions', help = wording.get('face_mask_regions_help').format(choices = ', '.join(DeepFakeAI.choices.face_mask_regions)), default = DeepFakeAI.choices.face_mask_regions, choices = DeepFakeAI.choices.face_mask_regions, nargs = '+', metavar = 'FACE_MASK_REGIONS')
# frame extraction
group_frame_extraction = program.add_argument_group('frame extraction')
group_frame_extraction.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), type = int)
group_frame_extraction.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), type = int)
group_frame_extraction.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), default = 'jpg', choices = DeepFakeAI.choices.temp_frame_formats)
group_frame_extraction.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), type = int, default = 100, choices = DeepFakeAI.choices.temp_frame_quality_range, metavar = create_metavar(DeepFakeAI.choices.temp_frame_quality_range))
group_frame_extraction.add_argument('--keep-temp', help = wording.get('keep_temp_help'), action = 'store_true')
# output creation
group_output_creation = program.add_argument_group('output creation')
group_output_creation.add_argument('--output-image-quality', help = wording.get('output_image_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_image_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_image_quality_range))
group_output_creation.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), default = 'libx264', choices = DeepFakeAI.choices.output_video_encoders)
group_output_creation.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), type = int, default = 80, choices = DeepFakeAI.choices.output_video_quality_range, metavar = create_metavar(DeepFakeAI.choices.output_video_quality_range))
group_output_creation.add_argument('--keep-fps', help = wording.get('keep_fps_help'), action = 'store_true')
group_output_creation.add_argument('--skip-audio', help = wording.get('skip_audio_help'), action = 'store_true')
# frame processors
available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules')
program = ArgumentParser(parents = [ program ], formatter_class = program.formatter_class, add_help = True)
group_frame_processors = program.add_argument_group('frame processors')
group_frame_processors.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(available_frame_processors)), default = [ 'face_swapper' ], nargs = '+')
for frame_processor in available_frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
frame_processor_module.register_args(group_frame_processors)
# uis
group_uis = program.add_argument_group('uis')
group_uis.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), default = [ 'default' ], nargs = '+')
run(program)
def apply_args(program : ArgumentParser) -> None:
args = program.parse_args()
# general
DeepFakeAI.globals.source_paths = args.source_paths
DeepFakeAI.globals.target_path = args.target_path
DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.target_path, args.output_path)
# misc
DeepFakeAI.globals.skip_download = args.skip_download
DeepFakeAI.globals.headless = args.headless
DeepFakeAI.globals.log_level = args.log_level
# execution
DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers)
DeepFakeAI.globals.execution_thread_count = args.execution_thread_count
DeepFakeAI.globals.execution_queue_count = args.execution_queue_count
DeepFakeAI.globals.max_memory = args.max_memory
# face analyser
DeepFakeAI.globals.face_analyser_order = args.face_analyser_order
DeepFakeAI.globals.face_analyser_age = args.face_analyser_age
DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender
DeepFakeAI.globals.face_detector_model = args.face_detector_model
DeepFakeAI.globals.face_detector_size = args.face_detector_size
DeepFakeAI.globals.face_detector_score = args.face_detector_score
# face selector
DeepFakeAI.globals.face_selector_mode = args.face_selector_mode
DeepFakeAI.globals.reference_face_position = args.reference_face_position
DeepFakeAI.globals.reference_face_distance = args.reference_face_distance
DeepFakeAI.globals.reference_frame_number = args.reference_frame_number
# face mask
DeepFakeAI.globals.face_mask_types = args.face_mask_types
DeepFakeAI.globals.face_mask_blur = args.face_mask_blur
DeepFakeAI.globals.face_mask_padding = normalize_padding(args.face_mask_padding)
DeepFakeAI.globals.face_mask_regions = args.face_mask_regions
# frame extraction
DeepFakeAI.globals.trim_frame_start = args.trim_frame_start
DeepFakeAI.globals.trim_frame_end = args.trim_frame_end
DeepFakeAI.globals.temp_frame_format = args.temp_frame_format
DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality
DeepFakeAI.globals.keep_temp = args.keep_temp
# output creation
DeepFakeAI.globals.output_image_quality = args.output_image_quality
DeepFakeAI.globals.output_video_encoder = args.output_video_encoder
DeepFakeAI.globals.output_video_quality = args.output_video_quality
DeepFakeAI.globals.keep_fps = args.keep_fps
DeepFakeAI.globals.skip_audio = args.skip_audio
# frame processors
available_frame_processors = list_module_names('DeepFakeAI/processors/frame/modules')
DeepFakeAI.globals.frame_processors = args.frame_processors
for frame_processor in available_frame_processors:
frame_processor_module = load_frame_processor_module(frame_processor)
frame_processor_module.apply_args(program)
# uis
DeepFakeAI.globals.ui_layouts = args.ui_layouts
def run(program : ArgumentParser) -> None:
apply_args(program)
logger.init(DeepFakeAI.globals.log_level)
limit_resources()
if not pre_check() or not content_analyser.pre_check() or not face_analyser.pre_check() or not face_masker.pre_check():
return
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
if not frame_processor_module.pre_check():
return
if DeepFakeAI.globals.headless:
conditional_process()
else:
import DeepFakeAI.uis.core as ui
for ui_layout in ui.get_ui_layouts_modules(DeepFakeAI.globals.ui_layouts):
if not ui_layout.pre_check():
return
ui.launch()
def destroy() -> None:
if DeepFakeAI.globals.target_path:
clear_temp(DeepFakeAI.globals.target_path)
sys.exit()
def limit_resources() -> None:
if DeepFakeAI.globals.max_memory:
memory = DeepFakeAI.globals.max_memory * 1024 ** 3
if platform.system().lower() == 'darwin':
memory = DeepFakeAI.globals.max_memory * 1024 ** 6
if platform.system().lower() == 'windows':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory))
else:
import resource
resource.setrlimit(resource.RLIMIT_DATA, (memory, memory))
def pre_check() -> bool:
if sys.version_info < (3, 9):
logger.error(wording.get('python_not_supported').format(version = '3.9'), __name__.upper())
return False
if not shutil.which('ffmpeg'):
logger.error(wording.get('ffmpeg_not_installed'), __name__.upper())
return False
return True
def conditional_process() -> None:
conditional_append_reference_faces()
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
if not frame_processor_module.pre_process('output'):
return
if is_image(DeepFakeAI.globals.target_path):
process_image()
if is_video(DeepFakeAI.globals.target_path):
process_video()
def conditional_append_reference_faces() -> None:
if 'reference' in DeepFakeAI.globals.face_selector_mode and not get_reference_faces():
source_frames = read_static_images(DeepFakeAI.globals.source_paths)
source_face = get_average_face(source_frames)
if is_video(DeepFakeAI.globals.target_path):
reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number)
else:
reference_frame = read_image(DeepFakeAI.globals.target_path)
reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position)
append_reference_face('origin', reference_face)
if source_face and reference_face:
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
reference_frame = frame_processor_module.get_reference_frame(source_face, reference_face, reference_frame)
reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position)
append_reference_face(frame_processor_module.__name__, reference_face)
def process_image() -> None:
if analyse_image(DeepFakeAI.globals.target_path):
return
shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
# process frame
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
logger.info(wording.get('processing'), frame_processor_module.NAME)
frame_processor_module.process_image(DeepFakeAI.globals.source_paths, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path)
frame_processor_module.post_process()
# compress image
logger.info(wording.get('compressing_image'), __name__.upper())
if not compress_image(DeepFakeAI.globals.output_path):
logger.error(wording.get('compressing_image_failed'), __name__.upper())
# validate image
if is_image(DeepFakeAI.globals.output_path):
logger.info(wording.get('processing_image_succeed'), __name__.upper())
else:
logger.error(wording.get('processing_image_failed'), __name__.upper())
def process_video() -> None:
if analyse_video(DeepFakeAI.globals.target_path, DeepFakeAI.globals.trim_frame_start, DeepFakeAI.globals.trim_frame_end):
return
fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0
# create temp
logger.info(wording.get('creating_temp'), __name__.upper())
create_temp(DeepFakeAI.globals.target_path)
# extract frames
logger.info(wording.get('extracting_frames_fps').format(fps = fps), __name__.upper())
extract_frames(DeepFakeAI.globals.target_path, fps)
# process frame
temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path)
if temp_frame_paths:
for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
logger.info(wording.get('processing'), frame_processor_module.NAME)
frame_processor_module.process_video(DeepFakeAI.globals.source_paths, temp_frame_paths)
frame_processor_module.post_process()
else:
logger.error(wording.get('temp_frames_not_found'), __name__.upper())
return
# merge video
logger.info(wording.get('merging_video_fps').format(fps = fps), __name__.upper())
if not merge_video(DeepFakeAI.globals.target_path, fps):
logger.error(wording.get('merging_video_failed'), __name__.upper())
return
# handle audio
if DeepFakeAI.globals.skip_audio:
logger.info(wording.get('skipping_audio'), __name__.upper())
move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
else:
logger.info(wording.get('restoring_audio'), __name__.upper())
if not restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path):
logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
# clear temp
logger.info(wording.get('clearing_temp'), __name__.upper())
clear_temp(DeepFakeAI.globals.target_path)
# validate video
if is_video(DeepFakeAI.globals.output_path):
logger.info(wording.get('processing_video_succeed'), __name__.upper())
else:
logger.error(wording.get('processing_video_failed'), __name__.upper())
|