File size: 7,169 Bytes
02c4dcb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
from typing import List, Optional, Tuple, Any, Dict
import gradio
import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion.face_cache import clear_faces_cache
from facefusion.vision import get_video_frame, read_static_image, normalize_frame_color
from facefusion.face_analyser import get_many_faces
from facefusion.face_reference import clear_face_reference
from facefusion.typing import Frame, FaceSelectorMode
from facefusion.utilities import is_image, is_video
from facefusion.uis.core import get_ui_component, register_ui_component
from facefusion.uis.typing import ComponentName
FACE_SELECTOR_MODE_DROPDOWN : Optional[gradio.Dropdown] = None
REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None
REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None
def render() -> None:
global FACE_SELECTOR_MODE_DROPDOWN
global REFERENCE_FACE_POSITION_GALLERY
global REFERENCE_FACE_DISTANCE_SLIDER
reference_face_gallery_args: Dict[str, Any] =\
{
'label': wording.get('reference_face_gallery_label'),
'object_fit': 'cover',
'columns': 8,
'allow_preview': False,
'visible': 'reference' in facefusion.globals.face_selector_mode
}
if is_image(facefusion.globals.target_path):
reference_frame = read_static_image(facefusion.globals.target_path)
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
if is_video(facefusion.globals.target_path):
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame)
FACE_SELECTOR_MODE_DROPDOWN = gradio.Dropdown(
label = wording.get('face_selector_mode_dropdown_label'),
choices = facefusion.choices.face_selector_modes,
value = facefusion.globals.face_selector_mode
)
REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args)
REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider(
label = wording.get('reference_face_distance_slider_label'),
value = facefusion.globals.reference_face_distance,
step = facefusion.choices.reference_face_distance_range[1] - facefusion.choices.reference_face_distance_range[0],
minimum = facefusion.choices.reference_face_distance_range[0],
maximum = facefusion.choices.reference_face_distance_range[-1],
visible = 'reference' in facefusion.globals.face_selector_mode
)
register_ui_component('face_selector_mode_dropdown', FACE_SELECTOR_MODE_DROPDOWN)
register_ui_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY)
register_ui_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER)
def listen() -> None:
FACE_SELECTOR_MODE_DROPDOWN.select(update_face_selector_mode, inputs = FACE_SELECTOR_MODE_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ])
REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_reference_face_position)
REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER)
multi_component_names : List[ComponentName] =\
[
'target_image',
'target_video'
]
for component_name in multi_component_names:
component = get_ui_component(component_name)
if component:
for method in [ 'upload', 'change', 'clear' ]:
getattr(component, method)(update_reference_face_position)
getattr(component, method)(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_one_component_names : List[ComponentName] =\
[
'face_analyser_order_dropdown',
'face_analyser_age_dropdown',
'face_analyser_gender_dropdown'
]
for component_name in change_one_component_names:
component = get_ui_component(component_name)
if component:
component.change(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
change_two_component_names : List[ComponentName] =\
[
'face_detector_model_dropdown',
'face_detector_size_dropdown',
'face_detector_score_slider'
]
for component_name in change_two_component_names:
component = get_ui_component(component_name)
if component:
component.change(clear_and_update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
preview_frame_slider = get_ui_component('preview_frame_slider')
if preview_frame_slider:
preview_frame_slider.change(update_reference_frame_number, inputs = preview_frame_slider)
preview_frame_slider.release(update_reference_position_gallery, outputs = REFERENCE_FACE_POSITION_GALLERY)
def update_face_selector_mode(face_selector_mode : FaceSelectorMode) -> Tuple[gradio.Gallery, gradio.Slider]:
if face_selector_mode == 'reference':
facefusion.globals.face_selector_mode = face_selector_mode
return gradio.Gallery(visible = True), gradio.Slider(visible = True)
if face_selector_mode == 'one':
facefusion.globals.face_selector_mode = face_selector_mode
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
if face_selector_mode == 'many':
facefusion.globals.face_selector_mode = face_selector_mode
return gradio.Gallery(visible = False), gradio.Slider(visible = False)
def clear_and_update_reference_face_position(event : gradio.SelectData) -> gradio.Gallery:
clear_face_reference()
clear_faces_cache()
update_reference_face_position(event.index)
return update_reference_position_gallery()
def update_reference_face_position(reference_face_position : int = 0) -> None:
facefusion.globals.reference_face_position = reference_face_position
def update_reference_face_distance(reference_face_distance : float) -> None:
facefusion.globals.reference_face_distance = reference_face_distance
def update_reference_frame_number(reference_frame_number : int) -> None:
facefusion.globals.reference_frame_number = reference_frame_number
def clear_and_update_reference_position_gallery() -> gradio.Gallery:
clear_face_reference()
clear_faces_cache()
return update_reference_position_gallery()
def update_reference_position_gallery() -> gradio.Gallery:
gallery_frames = []
if is_image(facefusion.globals.target_path):
reference_frame = read_static_image(facefusion.globals.target_path)
gallery_frames = extract_gallery_frames(reference_frame)
if is_video(facefusion.globals.target_path):
reference_frame = get_video_frame(facefusion.globals.target_path, facefusion.globals.reference_frame_number)
gallery_frames = extract_gallery_frames(reference_frame)
if gallery_frames:
return gradio.Gallery(value = gallery_frames)
return gradio.Gallery(value = None)
def extract_gallery_frames(reference_frame : Frame) -> List[Frame]:
crop_frames = []
faces = get_many_faces(reference_frame)
for face in faces:
start_x, start_y, end_x, end_y = map(int, face.bbox)
padding_x = int((end_x - start_x) * 0.25)
padding_y = int((end_y - start_y) * 0.25)
start_x = max(0, start_x - padding_x)
start_y = max(0, start_y - padding_y)
end_x = max(0, end_x + padding_x)
end_y = max(0, end_y + padding_y)
crop_frame = reference_frame[start_y:end_y, start_x:end_x]
crop_frame = normalize_frame_color(crop_frame)
crop_frames.append(crop_frame)
return crop_frames
|