import argparse import numpy as np import cv2 import os import gradio as gr import insightface from insightface.app import FaceAnalysis from dofaker import FaceSwapper, PoseSwapper def parse_args(): parser = argparse.ArgumentParser(description='โณ Running face swap') parser.add_argument( '--inbrowser', help='Automatically launch the interface in a new tab on the default browser.', dest='inbrowser', default=True) parser.add_argument( '--server_port', help='Start gradio app on this port (if available).', dest='server_port', type=int, default=None) return parser.parse_args() def swap_face(input_path, dst_path, src_path, use_enhancer, use_sr, scale, face_sim_thre): faker = FaceSwapper(use_enhancer=use_enhancer, use_sr=use_sr, scale=scale, face_sim_thre=face_sim_thre) output_path = faker.run(input_path, dst_path, src_path) return output_path def swap_pose(input_path, target_path, use_enhancer, use_sr, scale): faker = PoseSwapper(use_enhancer=use_enhancer, use_sr=use_sr, scale=scale) output_path = faker.run(input_path, target_path) return output_path def add_bbox_padding(bbox, margin=5): return [ bbox[0] - margin, bbox[1] - margin, bbox[2] + margin, bbox[3] + margin ] def select_handler(img, evt): faces = app.get(img) faces = sorted(faces, key=lambda x: x.bbox[0]) cropped_image = [] face_index = -1 sel_face_index = 0 for face in faces: box = face.bbox.astype(np.int32) face_index += 1 if point_in_box((box[0], box[1]), (box[2], box[3]), (evt.index[0], evt.index[1])): margin = int((box[2] - box[0]) * 0.35) box = add_bbox_padding(box, margin) box = np.clip(box, 0, None) sel_face_index = face_index cropped_image = img[box[1]:box[3], box[0]:box[2]] return cropped_image, sel_face_index def point_in_box(bl, tr, p): return bl[0] < p[0] < tr[0] and bl[1] < p[1] < tr[1] def get_faces(img): faces = app.get(img) faces = sorted(faces, key=lambda x: x.bbox[0]) return img, len(faces) def main(): args = parse_args() app = FaceAnalysis(name='buffalo_l') app.prepare(ctx_id=0, det_size=(640, 640)) swapper = insightface.model_zoo.get_model('inswapper_128.onnx', download=True, download_zip=True) with gr.Blocks(theme='ysharma/huggingface@=0.0.4') as web_ui: gr.Markdown('# ๐Ÿ‘‘ FaceClone') gr.Markdown('### Deepfake Swap Face and Pose.') with gr.Tab('๐Ÿงฉ FaceSwapper'): gr.Markdown(' ๐Ÿงธ FaceClone: Powered by Gradio') with gr.Tab('๐Ÿ“ฅ Face'): with gr.Row(): with gr.Column(): gr.Markdown('๐Ÿ–‡๏ธ Source Face to be swapped') image_input = gr.Image(type='filepath') with gr.Row(): with gr.Column(): gr.Markdown('๐ŸŽญ Target source included with Face') dst_face_image = gr.Image(type='filepath') with gr.Column(): gr.Markdown('๐Ÿง‘โ€๐Ÿฆณ Source Face to Replace Target Face') src_face_image = gr.Image(type='filepath') with gr.Column(): output_image = gr.Image(type='filepath') use_enhancer = gr.Checkbox(label="๐Ÿ‘‘ Face Enhance", info="๐Ÿงฟ Whether use face enhance model.") with gr.Row(): use_sr = gr.Checkbox(label="๐Ÿ› ๏ธ Super resolution", info="โš’๏ธ Whether use image resolution model.") scale = gr.Number(value=1, label='๐Ÿ“ Image super resolution scale') with gr.Row(): face_sim_thre = gr.Number(value=0.6, label='๐Ÿงฒ Face similarity threshold', minimum=0.0, maximum=1.0) convert_button = gr.Button('๐Ÿ” Swap') convert_button.click(fn=swap_face, inputs=[image_input, dst_face_image, src_face_image, use_enhancer, use_sr, scale, face_sim_thre], outputs=[output_image]) with gr.Tab('๐Ÿ–ฅ๏ธ Video'): with gr.Row(): with gr.Column(): gr.Markdown('๐Ÿ“บ Target Video') video_input = gr.Video() with gr.Row(): with gr.Column(): gr.Markdown('๐Ÿ‘ณ Target Face') dst_face_image = gr.Image(type='filepath') with gr.Column(): gr.Markdown('๐Ÿซ… Source Face') src_face_image = gr.Image(type='filepath') with gr.Column(): output_video = gr.Video() use_enhancer = gr.Checkbox(label="๐Ÿ“ธ Face Enhance", info="๐Ÿ“ท Whether use face enhance model.") with gr.Row(): use_sr = gr.Checkbox(label="๐Ÿ”‹ Super Resolution", info="๐Ÿ”ฆ Whether use image resolution model.") scale = gr.Number(value=1, label='๐Ÿ—ž๏ธ Super Resolution Image') with gr.Row(): face_sim_thre = gr.Number(value=0.6, label='๐Ÿ’Œ Face similarity threshold', minimum=0.0, maximum=1.0) convert_button = gr.Button('๐ŸŽฏ Swap') convert_button.click(fn=swap_face, inputs=[video_input, dst_face_image, src_face_image, use_enhancer, use_sr, scale, face_sim_thre], outputs=[output_video]) with gr.Tab('๐Ÿชฉ PoseSwapper'): gr.Markdown(' ๐Ÿงธ FaceClone: Deepfake powered by Gradio') with gr.Tab('๐Ÿ“ท Image'): with gr.Row(): with gr.Column(): gr.Markdown('๐Ÿง‘โ€๐Ÿฆณ The source image to be swapped') image_input = gr.Image(type='filepath') gr.Markdown('๐Ÿ—ณ๏ธ The target image with pose') target = gr.Image(type='filepath') with gr.Column(): output_image = gr.Image(type='filepath') use_enhancer = gr.Checkbox(label="๐Ÿ‘‘ Face Enhance", info="๐Ÿงพ Whether use face enhance model.") with gr.Row(): use_sr = gr.Checkbox(label="๐Ÿ› ๏ธ Super Resolution", info="โŒ› Whether use image resolution model.") scale = gr.Number(value=1, label='๐Ÿ“ธ Super resolution scale') convert_button = gr.Button('๐ŸŽฏ Swap') convert_button.click(fn=swap_pose, inputs=[image_input, target, use_enhancer, use_sr, scale], outputs=[output_image]) web_ui.launch(inbrowser=args.inbrowser, server_port=args.server_port) if __name__ == '__main__': main()