FaceRecognition / utils.py
datnguyentien204's picture
Upload folder using huggingface_hub (#1)
f1ce61b verified
import os
import numpy as np
import pickle as pkl
import yaml
from collections import defaultdict
import tensorflow.compat.v1 as tf
import facenet
from PIL import Image
infomation = defaultdict(dict)
cfg = yaml.load(open('config.yaml', 'r'), Loader=yaml.FullLoader)
MODEL_DIR = cfg['PATH']['MODEL_DIR']
CLASSIFIER_DIR = cfg['PATH']['CLASSIFIER_DIR']
NPY_DIR = cfg['PATH']['NPY_DIR']
TRAIN_IMG_DIR = cfg['PATH']['TRAIN_IMG_DIR']
def get_elements_dir(x):
path = x
return path
def load_essentail_components():
model_dir = get_elements_dir(MODEL_DIR)
classifier_filename = get_elements_dir(CLASSIFIER_DIR)
npy = get_elements_dir(NPY_DIR)
train_img = get_elements_dir(TRAIN_IMG_DIR)
return model_dir, classifier_filename, npy, train_img
def gpu_session():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
return sess
def configure_mtcnn(sess, npy, train_img):
pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)
minsize = 30 # minimum size of face
threshold = [0.7, 0.8, 0.8] # three steps's threshold
factor = 0.709 # scale factor
margin = 44
batch_size = 100 # 1000
image_size = 182
input_image_size = 160
HumanNames = os.listdir(train_img)
HumanNames.sort()
def recognize(image):
model_dir, classifier_filename, npy, train_img = load_essentail_components()
with tf.Graph().as_default():
sess = gpu_session()
with sess.as_default():
configure_mtcnn(sess, npy, train_img)
print('Loading Model ...')
facenet.load_model(model=model_dir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
classifier_filename_exp = os.path.expanduser(classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile, encoding='latin1')
if image.ndim == 2:
image = facenet.to_rgb(image)
bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
faceNum = bounding_boxes.shape[0]