File size: 3,239 Bytes
df65035
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os
import cv2
import torch
from model import U2NET
from torch.autograd import Variable
import numpy as np
from huggingface_hub import hf_hub_download
import gradio as gr

# Hàm phát hiện một khuôn mặt duy nhất
def detect_single_face(face_cascade, img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.1, 4)
    if len(faces) == 0:
        print("Warning: No face detected, running on the whole image!")
        return None
    wh, idx = 0, 0
    for i, (x, y, w, h) in enumerate(faces):
        if w * h > wh:
            idx, wh = i, w * h
    return faces[idx]

# Hàm cắt và chuẩn hóa khuôn mặt
def crop_face(img, face):
    if face is None:
        return img
    (x, y, w, h) = face
    height, width = img.shape[:2]
    lpad, rpad, tpad, bpad = int(w * 0.4), int(w * 0.4), int(h * 0.6), int(h * 0.2)
    left, right = max(0, x - lpad), min(width, x + w + rpad)
    top, bottom = max(0, y - tpad), min(height, y + h + bpad)
    im_face = img[top:bottom, left:right]
    if len(im_face.shape) == 2:
        im_face = np.repeat(im_face[:, :, np.newaxis], 3, axis=2)
    im_face = np.pad(im_face, ((tpad, bpad), (lpad, rpad), (0, 0)), mode='constant', constant_values=255)
    im_face = cv2.resize(im_face, (512, 512), interpolation=cv2.INTER_AREA)
    return im_face

# Chuẩn hóa dự đoán
def normPRED(d):
    return (d - torch.min(d)) / (torch.max(d) - torch.min(d))

# Hàm suy luận với U2NET
def inference(net, input_img):
    input_img = input_img / np.max(input_img)
    tmpImg = np.zeros((input_img.shape[0], input_img.shape[1], 3))
    tmpImg[:, :, 0] = (input_img[:, :, 2] - 0.406) / 0.225
    tmpImg[:, :, 1] = (input_img[:, :, 1] - 0.456) / 0.224
    tmpImg[:, :, 2] = (input_img[:, :, 0] - 0.485) / 0.229
    tmpImg = torch.from_numpy(tmpImg.transpose((2, 0, 1))[np.newaxis, :, :, :]).type(torch.FloatTensor)
    tmpImg = Variable(tmpImg.cuda() if torch.cuda.is_available() else tmpImg)
    d1, _, _, _, _, _, _ = net(tmpImg)
    pred = normPRED(1.0 - d1[:, 0, :, :])
    return pred.cpu().data.numpy().squeeze()

# Hàm chính để xử lý ảnh đầu vào và trả về ảnh chân dung
def process_image(img):
    face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
    face = detect_single_face(face_cascade, img)
    cropped_face = crop_face(img, face)
    result = inference(u2net, cropped_face)
    return (result * 255).astype(np.uint8)

# Tải mô hình từ Hugging Face Hub
def load_u2net_model():
    model_path = hf_hub_download(repo_id="Arrcttacsrks/U2net", filename="u2net_portrait.pth", use_auth_token=os.getenv("HF_TOKEN"))
    net = U2NET(3, 1)
    net.load_state_dict(torch.load(model_path, map_location="cuda" if torch.cuda.is_available() else "cpu"))
    net.eval()
    return net

# Khởi tạo mô hình U2NET
u2net = load_u2net_model()

# Tạo giao diện với Gradio
iface = gr.Interface(
    fn=process_image,
    inputs=gr.Image(type="numpy", label="Upload your image"),
    outputs=gr.Image(type="numpy", label="Portrait Result"),
    title="Portrait Generation with U2NET",
    description="Upload an image to generate its portrait."
)

iface.launch()