datnguyentien204 commited on
Commit
03b1ce7
1 Parent(s): 6d42b85

6797ab7cc662a6915657900b76bbadf3d8ccd13b941db166ca7352788794f756

Browse files
Files changed (39) hide show
  1. HD.txt +0 -0
  2. aligned_img/Phong/WIN_20241001_07_47_20_Pro_2.png +3 -0
  3. aligned_img/Phong/WIN_20241001_07_47_21_Pro.png +3 -0
  4. aligned_img/Phong/WIN_20241001_07_47_21_Pro_2.png +3 -0
  5. aligned_img/Phong/WIN_20241001_07_47_21_Pro_3.png +3 -0
  6. aligned_img/Phong/WIN_20241001_07_47_22_Pro.png +3 -0
  7. aligned_img/Phong/WIN_20241001_07_47_22_Pro_2.png +3 -0
  8. aligned_img/Phong/WIN_20241001_07_47_22_Pro_3.png +3 -0
  9. aligned_img/Phong/WIN_20241001_07_47_23_Pro.png +3 -0
  10. aligned_img/Phong/WIN_20241001_07_47_23_Pro_2.png +3 -0
  11. aligned_img/Phong/WIN_20241001_07_47_23_Pro_3.png +3 -0
  12. aligned_img/Phong/WIN_20241001_07_47_24_Pro.png +3 -0
  13. aligned_img/Phong/WIN_20241001_07_47_24_Pro_2.png +3 -0
  14. aligned_img/Phong/WIN_20241001_07_47_25_Pro.png +3 -0
  15. aligned_img/Phong/WIN_20241001_07_47_25_Pro_2.png +3 -0
  16. aligned_img/Phong/WIN_20241001_07_47_25_Pro_3.png +3 -0
  17. aligned_img/Phong/WIN_20241001_07_47_26_Pro.png +3 -0
  18. aligned_img/Phong/WIN_20241001_07_47_26_Pro_2.png +3 -0
  19. aligned_img/Phong/WIN_20241001_07_47_27_Pro.png +3 -0
  20. aligned_img/Phong/WIN_20241001_07_47_27_Pro_2.png +3 -0
  21. class/classifier.pkl +3 -0
  22. classifier.py +59 -0
  23. config.yaml +9 -0
  24. data_preprocess.py +10 -0
  25. detect_face.py +778 -0
  26. face_recognition.py +104 -0
  27. facenet.py +544 -0
  28. haarcascade_frontalface_default.xml +0 -0
  29. insert_new_faces.py +53 -0
  30. model/20180402-114759.pb +3 -0
  31. npy/det1.npy +3 -0
  32. npy/det2.npy +3 -0
  33. npy/det3.npy +3 -0
  34. preprocess.py +108 -0
  35. requirements.txt +5 -0
  36. train_img/Kien.zip +3 -0
  37. train_img/Phong.zip +3 -0
  38. train_main.py +17 -0
  39. utils.py +66 -0
HD.txt ADDED
File without changes
aligned_img/Phong/WIN_20241001_07_47_20_Pro_2.png ADDED

Git LFS Details

  • SHA256: ebae5619c2ea8bb610e51e01f17cb0ee75aa05b85a1dbc0fe31185d7df53f6f3
  • Pointer size: 130 Bytes
  • Size of remote file: 41 kB
aligned_img/Phong/WIN_20241001_07_47_21_Pro.png ADDED

Git LFS Details

  • SHA256: 847ef31529ae83ae55851cd4b7e9be984ad59cc92ba6abfebaa1dcae8af78d13
  • Pointer size: 130 Bytes
  • Size of remote file: 39.7 kB
aligned_img/Phong/WIN_20241001_07_47_21_Pro_2.png ADDED

Git LFS Details

  • SHA256: e249aa3fe4b90aca8e092151a977bc7016b0ef926225c86e61d9753c93b1748c
  • Pointer size: 130 Bytes
  • Size of remote file: 39.5 kB
aligned_img/Phong/WIN_20241001_07_47_21_Pro_3.png ADDED

Git LFS Details

  • SHA256: 73172039183367558af5a5f7648a0eb7c2e74367609ea4e6094574aba13057a7
  • Pointer size: 130 Bytes
  • Size of remote file: 39.8 kB
aligned_img/Phong/WIN_20241001_07_47_22_Pro.png ADDED

Git LFS Details

  • SHA256: cb4f41fdcbbe4815bffbb3b4d85c985bda90716acba622077206dda592397294
  • Pointer size: 130 Bytes
  • Size of remote file: 40.5 kB
aligned_img/Phong/WIN_20241001_07_47_22_Pro_2.png ADDED

Git LFS Details

  • SHA256: aa2799deb237356ebc151cd0b1cc6d6ef144c1c88507174ef49ef42530e33c56
  • Pointer size: 130 Bytes
  • Size of remote file: 41.6 kB
aligned_img/Phong/WIN_20241001_07_47_22_Pro_3.png ADDED

Git LFS Details

  • SHA256: 2c31d73afecd318043eb7410ced802d92a8c4ec129da49af4ddc7ca2854eda03
  • Pointer size: 130 Bytes
  • Size of remote file: 42.8 kB
aligned_img/Phong/WIN_20241001_07_47_23_Pro.png ADDED

Git LFS Details

  • SHA256: f4b4a3183a2b626fc47a98db26dd27f6ffa7845519dce2fd4d659367f5e89436
  • Pointer size: 130 Bytes
  • Size of remote file: 41.3 kB
aligned_img/Phong/WIN_20241001_07_47_23_Pro_2.png ADDED

Git LFS Details

  • SHA256: 24a362acfaa96d1ae438d4e3b440a4ebbe677a248b007e2d35f24294e00abe3e
  • Pointer size: 130 Bytes
  • Size of remote file: 44 kB
aligned_img/Phong/WIN_20241001_07_47_23_Pro_3.png ADDED

Git LFS Details

  • SHA256: 2196f5f08b4069e1ee82a1069c3971d7002cecd3055efa3eecf03f36accd3bd4
  • Pointer size: 130 Bytes
  • Size of remote file: 42.5 kB
aligned_img/Phong/WIN_20241001_07_47_24_Pro.png ADDED

Git LFS Details

  • SHA256: eee60c3b9be7a494e8d8ceb1e26053e3cdb3f5f955d4b3b5fa7b65e4230fe339
  • Pointer size: 130 Bytes
  • Size of remote file: 41.6 kB
aligned_img/Phong/WIN_20241001_07_47_24_Pro_2.png ADDED

Git LFS Details

  • SHA256: c043338690359f78960776f079d2cedf89f7b92ecff8a0c862856e2435df2f48
  • Pointer size: 130 Bytes
  • Size of remote file: 41.7 kB
aligned_img/Phong/WIN_20241001_07_47_25_Pro.png ADDED

Git LFS Details

  • SHA256: 11704a61964f59d92b60aa1239efafb8c028bcba51884eeca884652c87cc41e2
  • Pointer size: 130 Bytes
  • Size of remote file: 40.9 kB
aligned_img/Phong/WIN_20241001_07_47_25_Pro_2.png ADDED

Git LFS Details

  • SHA256: d1ebd4d366278ef225ff19f0c820bcc34777b33637f03c9598682362ce1cc3e2
  • Pointer size: 130 Bytes
  • Size of remote file: 41.3 kB
aligned_img/Phong/WIN_20241001_07_47_25_Pro_3.png ADDED

Git LFS Details

  • SHA256: 9657ccb74f435e396b2dbb74bedf71c2505b03cde374e2cf455c31465744a5de
  • Pointer size: 130 Bytes
  • Size of remote file: 41.2 kB
aligned_img/Phong/WIN_20241001_07_47_26_Pro.png ADDED

Git LFS Details

  • SHA256: f15edc59a4c1ccb3981dc6c49163d483c8dfa67e98555cdee88f6a0381dc73f5
  • Pointer size: 130 Bytes
  • Size of remote file: 42.3 kB
aligned_img/Phong/WIN_20241001_07_47_26_Pro_2.png ADDED

Git LFS Details

  • SHA256: 9dd15fb0d9d32940505c61b8854b52ccf2ffa4d45c73c06e334ab00094142789
  • Pointer size: 130 Bytes
  • Size of remote file: 41.9 kB
aligned_img/Phong/WIN_20241001_07_47_27_Pro.png ADDED

Git LFS Details

  • SHA256: f18eaf39dcb3173631be02fd1b9ac160218e378558675eaa69a2d5b1fdf2f34a
  • Pointer size: 130 Bytes
  • Size of remote file: 42.3 kB
aligned_img/Phong/WIN_20241001_07_47_27_Pro_2.png ADDED

Git LFS Details

  • SHA256: ffdfea28cda842de8ed89af25d88f78970ef38d35ec41ceb744f79aa723cd45e
  • Pointer size: 130 Bytes
  • Size of remote file: 42.8 kB
class/classifier.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dd3eccdad8713fde86518b3160dd8fda4ca8ec8fd3b7321efe17220076321ba
3
+ size 511798
classifier.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+ import numpy as np
5
+ import facenet
6
+ import os
7
+ import math
8
+ import pickle
9
+ from sklearn.svm import SVC
10
+ import sys
11
+ import tensorflow.compat.v1 as tf
12
+
13
+ class training:
14
+ def __init__(self, datadir, modeldir,classifier_filename):
15
+ self.datadir = datadir
16
+ self.modeldir = modeldir
17
+ self.classifier_filename = classifier_filename
18
+
19
+ def main_train(self):
20
+ with tf.Graph().as_default():
21
+ with tf.Session() as sess:
22
+ img_data = facenet.get_dataset(self.datadir)
23
+ path, label = facenet.get_image_paths_and_labels(img_data)
24
+ print('Classes: %d' % len(img_data))
25
+ print('Images: %d' % len(path))
26
+
27
+ facenet.load_model(self.modeldir)
28
+ images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
29
+ embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
30
+ phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
31
+ embedding_size = embeddings.get_shape()[1]
32
+
33
+ print('Extracting features of images for model')
34
+ batch_size = 256
35
+ image_size = 160 #160
36
+ nrof_images = len(path)
37
+ nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / batch_size))
38
+ emb_array = np.zeros((nrof_images, embedding_size))
39
+ for i in range(nrof_batches_per_epoch):
40
+ start_index = i * batch_size
41
+ end_index = min((i + 1) * batch_size, nrof_images)
42
+ paths_batch = path[start_index:end_index]
43
+ images = facenet.load_data(paths_batch, False, False, image_size)
44
+ feed_dict = {images_placeholder: images, phase_train_placeholder: False}
45
+ emb_array[start_index:end_index, :] = sess.run(embeddings, feed_dict=feed_dict)
46
+
47
+ classifier_file_name = os.path.expanduser(self.classifier_filename)
48
+
49
+ # Training Started
50
+ print('Training Started')
51
+ model = SVC(kernel='linear', probability=True)
52
+ model.fit(emb_array, label)
53
+
54
+ class_names = [cls.name.replace('_', ' ') for cls in img_data]
55
+
56
+ # Saving model
57
+ with open(classifier_file_name, 'wb') as outfile:
58
+ pickle.dump((model, class_names), outfile)
59
+ return classifier_file_name
config.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ PATH:
2
+ MODEL_DIR: './model/20180402-114759.pb'
3
+ CLASSIFIER_DIR: './class/classifier.pkl'
4
+ NPY_DIR: './npy'
5
+ TRAIN_IMG_DIR: './train_img'
6
+
7
+ INFO:
8
+ PICTURE_PROMPT: 'This app recognizes faces in a uploaded picture. To use it, simply press start and allow access to your webcam.'
9
+ WEBCAM_PROMPT: 'This app recognizes faces in a live video stream. To use it, simply press "Browse files" and upload the pictures which you want to recognize.'
data_preprocess.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from preprocess import preprocesses
2
+
3
+ input_datadir = './train_img'
4
+ output_datadir = './aligned_img'
5
+
6
+ obj=preprocesses(input_datadir, output_datadir)
7
+ nrof_images_total, nrof_successfully_aligned=obj.collect_data()
8
+
9
+ print('Total number of images: %d' % nrof_images_total)
10
+ print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
detect_face.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Tensorflow implementation of the face detection / alignment algorithm found at
2
+ https://github.com/kpzhang93/MTCNN_face_detection_alignment
3
+ """
4
+ # MIT License
5
+ #
6
+ # Copyright (c) 2016 David Sandberg
7
+ #
8
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ # of this software and associated documentation files (the "Software"), to deal
10
+ # in the Software without restriction, including without limitation the rights
11
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ # copies of the Software, and to permit persons to whom the Software is
13
+ # furnished to do so, subject to the following conditions:
14
+ #
15
+ # The above copyright notice and this permission notice shall be included in all
16
+ # copies or substantial portions of the Software.
17
+ #
18
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ # SOFTWARE.
25
+
26
+ from __future__ import absolute_import
27
+ from __future__ import division
28
+ from __future__ import print_function
29
+ from six import string_types, iteritems
30
+
31
+ import numpy as np
32
+ import tensorflow.compat.v1 as tf
33
+ #from math import floor
34
+ import cv2
35
+ import os
36
+
37
+ def layer(op):
38
+ '''Decorator for composable network layers.'''
39
+
40
+ def layer_decorated(self, *args, **kwargs):
41
+ # Automatically set a name if not provided.
42
+ name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
43
+ # Figure out the layer inputs.
44
+ if len(self.terminals) == 0:
45
+ raise RuntimeError('No input variables found for layer %s.' % name)
46
+ elif len(self.terminals) == 1:
47
+ layer_input = self.terminals[0]
48
+ else:
49
+ layer_input = list(self.terminals)
50
+ # Perform the operation and get the output.
51
+ layer_output = op(self, layer_input, *args, **kwargs)
52
+ # Add to layer LUT.
53
+ self.layers[name] = layer_output
54
+ # This output is now the input for the next layer.
55
+ self.feed(layer_output)
56
+ # Return self for chained calls.
57
+ return self
58
+
59
+ return layer_decorated
60
+
61
+ class Network(object):
62
+
63
+ def __init__(self, inputs, trainable=True):
64
+ # The input nodes for this network
65
+ self.inputs = inputs
66
+ # The current list of terminal nodes
67
+ self.terminals = []
68
+ # Mapping from layer names to layers
69
+ self.layers = dict(inputs)
70
+ # If true, the resulting variables are set as trainable
71
+ self.trainable = trainable
72
+
73
+ self.setup()
74
+
75
+ def setup(self):
76
+ '''Construct the network. '''
77
+ raise NotImplementedError('Must be implemented by the subclass.')
78
+
79
+ def load(self, data_path, session, ignore_missing=False):
80
+ '''Load network weights.
81
+ data_path: The path to the numpy-serialized network weights
82
+ session: The current TensorFlow session
83
+ ignore_missing: If true, serialized weights for missing layers are ignored.
84
+ '''
85
+ data_dict = np.load(data_path, allow_pickle=True, encoding='latin1').item() #pylint: disable=no-member
86
+
87
+ for op_name in data_dict:
88
+ with tf.variable_scope(op_name, reuse=True):
89
+ for param_name, data in iteritems(data_dict[op_name]):
90
+ try:
91
+ var = tf.get_variable(param_name)
92
+ session.run(var.assign(data))
93
+ except ValueError:
94
+ if not ignore_missing:
95
+ raise
96
+
97
+ def feed(self, *args):
98
+ '''Set the input(s) for the next operation by replacing the terminal nodes.
99
+ The arguments can be either layer names or the actual layers.
100
+ '''
101
+ assert len(args) != 0
102
+ self.terminals = []
103
+ for fed_layer in args:
104
+ if isinstance(fed_layer, string_types):
105
+ try:
106
+ fed_layer = self.layers[fed_layer]
107
+ except KeyError:
108
+ raise KeyError('Unknown layer name fed: %s' % fed_layer)
109
+ self.terminals.append(fed_layer)
110
+ return self
111
+
112
+ def get_output(self):
113
+ '''Returns the current network output.'''
114
+ return self.terminals[-1]
115
+
116
+ def get_unique_name(self, prefix):
117
+ '''Returns an index-suffixed unique name for the given prefix.
118
+ This is used for auto-generating layer names based on the type-prefix.
119
+ '''
120
+ ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
121
+ return '%s_%d' % (prefix, ident)
122
+
123
+ def make_var(self, name, shape):
124
+ '''Creates a new TensorFlow variable.'''
125
+ return tf.get_variable(name, shape, trainable=self.trainable)
126
+
127
+ def validate_padding(self, padding):
128
+ '''Verifies that the padding is one of the supported ones.'''
129
+ assert padding in ('SAME', 'VALID')
130
+
131
+ @layer
132
+ def conv(self,
133
+ inp,
134
+ k_h,
135
+ k_w,
136
+ c_o,
137
+ s_h,
138
+ s_w,
139
+ name,
140
+ relu=True,
141
+ padding='SAME',
142
+ group=1,
143
+ biased=True):
144
+ # Verify that the padding is acceptable
145
+ self.validate_padding(padding)
146
+ # Get the number of channels in the input
147
+ c_i = int(inp.get_shape()[-1])
148
+ # Verify that the grouping parameter is valid
149
+ assert c_i % group == 0
150
+ assert c_o % group == 0
151
+ # Convolution for a given input and kernel
152
+ convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
153
+ with tf.variable_scope(name) as scope:
154
+ kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
155
+ # This is the common-case. Convolve the input without any further complications.
156
+ output = convolve(inp, kernel)
157
+ # Add the biases
158
+ if biased:
159
+ biases = self.make_var('biases', [c_o])
160
+ output = tf.nn.bias_add(output, biases)
161
+ if relu:
162
+ # ReLU non-linearity
163
+ output = tf.nn.relu(output, name=scope.name)
164
+ return output
165
+
166
+ @layer
167
+ def prelu(self, inp, name):
168
+ with tf.variable_scope(name):
169
+ i = int(inp.get_shape()[-1])
170
+ alpha = self.make_var('alpha', shape=(i,))
171
+ output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
172
+ return output
173
+
174
+ @layer
175
+ def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
176
+ self.validate_padding(padding)
177
+ return tf.nn.max_pool(inp,
178
+ ksize=[1, k_h, k_w, 1],
179
+ strides=[1, s_h, s_w, 1],
180
+ padding=padding,
181
+ name=name)
182
+
183
+ @layer
184
+ def fc(self, inp, num_out, name, relu=True):
185
+ with tf.variable_scope(name):
186
+ input_shape = inp.get_shape()
187
+ if input_shape.ndims == 4:
188
+ # The input is spatial. Vectorize it first.
189
+ dim = 1
190
+ for d in input_shape[1:].as_list():
191
+ dim *= int(d)
192
+ feed_in = tf.reshape(inp, [-1, dim])
193
+ else:
194
+ feed_in, dim = (inp, input_shape.as_list()[-1])
195
+ weights = self.make_var('weights', shape=[dim, num_out])
196
+ biases = self.make_var('biases', [num_out])
197
+ op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
198
+ fc = op(feed_in, weights, biases, name=name)
199
+ return fc
200
+
201
+
202
+ """
203
+ Multi dimensional softmax,
204
+ refer to https://github.com/tensorflow/tensorflow/issues/210
205
+ compute softmax along the dimension of target
206
+ the native softmax only supports batch_size x dimension
207
+ """
208
+ @layer
209
+ def softmax(self, target, axis, name=None):
210
+ max_axis = tf.reduce_max(target, axis, keep_dims=True)
211
+ target_exp = tf.exp(target-max_axis)
212
+ normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)
213
+ softmax = tf.div(target_exp, normalize, name)
214
+ return softmax
215
+
216
+ class PNet(Network):
217
+ def setup(self):
218
+ (self.feed('data') #pylint: disable=no-value-for-parameter, no-member
219
+ .conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
220
+ .prelu(name='PReLU1')
221
+ .max_pool(2, 2, 2, 2, name='pool1')
222
+ .conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
223
+ .prelu(name='PReLU2')
224
+ .conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
225
+ .prelu(name='PReLU3')
226
+ .conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
227
+ .softmax(3,name='prob1'))
228
+
229
+ (self.feed('PReLU3') #pylint: disable=no-value-for-parameter
230
+ .conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
231
+
232
+ class RNet(Network):
233
+ def setup(self):
234
+ (self.feed('data') #pylint: disable=no-value-for-parameter, no-member
235
+ .conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
236
+ .prelu(name='prelu1')
237
+ .max_pool(3, 3, 2, 2, name='pool1')
238
+ .conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
239
+ .prelu(name='prelu2')
240
+ .max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
241
+ .conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
242
+ .prelu(name='prelu3')
243
+ .fc(128, relu=False, name='conv4')
244
+ .prelu(name='prelu4')
245
+ .fc(2, relu=False, name='conv5-1')
246
+ .softmax(1,name='prob1'))
247
+
248
+ (self.feed('prelu4') #pylint: disable=no-value-for-parameter
249
+ .fc(4, relu=False, name='conv5-2'))
250
+
251
+ class ONet(Network):
252
+ def setup(self):
253
+ (self.feed('data') #pylint: disable=no-value-for-parameter, no-member
254
+ .conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
255
+ .prelu(name='prelu1')
256
+ .max_pool(3, 3, 2, 2, name='pool1')
257
+ .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
258
+ .prelu(name='prelu2')
259
+ .max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
260
+ .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
261
+ .prelu(name='prelu3')
262
+ .max_pool(2, 2, 2, 2, name='pool3')
263
+ .conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
264
+ .prelu(name='prelu4')
265
+ .fc(256, relu=False, name='conv5')
266
+ .prelu(name='prelu5')
267
+ .fc(2, relu=False, name='conv6-1')
268
+ .softmax(1, name='prob1'))
269
+
270
+ (self.feed('prelu5') #pylint: disable=no-value-for-parameter
271
+ .fc(4, relu=False, name='conv6-2'))
272
+
273
+ (self.feed('prelu5') #pylint: disable=no-value-for-parameter
274
+ .fc(10, relu=False, name='conv6-3'))
275
+
276
+ def create_mtcnn(sess, model_path):
277
+ if not model_path:
278
+ model_path,_ = os.path.split(os.path.realpath(__file__))
279
+
280
+ with tf.variable_scope('pnet'):
281
+ data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
282
+ pnet = PNet({'data':data})
283
+ pnet.load(os.path.join(model_path, 'det1.npy'), sess)
284
+ with tf.variable_scope('rnet'):
285
+ data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
286
+ rnet = RNet({'data':data})
287
+ rnet.load(os.path.join(model_path, 'det2.npy'), sess)
288
+ with tf.variable_scope('onet'):
289
+ data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
290
+ onet = ONet({'data':data})
291
+ onet.load(os.path.join(model_path, 'det3.npy'), sess)
292
+
293
+ pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
294
+ rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
295
+ onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
296
+ return pnet_fun, rnet_fun, onet_fun
297
+
298
+ def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
299
+ # im: input image
300
+ # minsize: minimum of faces' size
301
+ # pnet, rnet, onet: caffemodel
302
+ # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
303
+ # fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
304
+ factor_count=0
305
+ total_boxes=np.empty((0, 9))
306
+ points=np.empty(0)
307
+ h=img.shape[0]
308
+ w=img.shape[1]
309
+ minl=np.amin([h, w])
310
+ m=12.0/minsize
311
+ minl=minl*m
312
+ # creat scale pyramid
313
+ scales=[]
314
+ while minl>=12:
315
+ scales += [m*np.power(factor, factor_count)]
316
+ minl = minl*factor
317
+ factor_count += 1
318
+
319
+ # first stage
320
+ for j in range(len(scales)):
321
+ scale=scales[j]
322
+ hs=int(np.ceil(h*scale))
323
+ ws=int(np.ceil(w*scale))
324
+ im_data = imresample(img, (hs, ws))
325
+ im_data = (im_data-127.5)*0.0078125
326
+ img_x = np.expand_dims(im_data, 0)
327
+ img_y = np.transpose(img_x, (0,2,1,3))
328
+ out = pnet(img_y)
329
+ out0 = np.transpose(out[0], (0,2,1,3))
330
+ out1 = np.transpose(out[1], (0,2,1,3))
331
+
332
+ boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
333
+
334
+ # inter-scale nms
335
+ pick = nms(boxes.copy(), 0.5, 'Union')
336
+ if boxes.size>0 and pick.size>0:
337
+ boxes = boxes[pick,:]
338
+ total_boxes = np.append(total_boxes, boxes, axis=0)
339
+
340
+ numbox = total_boxes.shape[0]
341
+ if numbox>0:
342
+ pick = nms(total_boxes.copy(), 0.7, 'Union')
343
+ total_boxes = total_boxes[pick,:]
344
+ regw = total_boxes[:,2]-total_boxes[:,0]
345
+ regh = total_boxes[:,3]-total_boxes[:,1]
346
+ qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
347
+ qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
348
+ qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
349
+ qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
350
+ total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
351
+ total_boxes = rerec(total_boxes.copy())
352
+ total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
353
+ dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
354
+
355
+ numbox = total_boxes.shape[0]
356
+ if numbox>0:
357
+ # second stage
358
+ tempimg = np.zeros((24,24,3,numbox))
359
+ for k in range(0,numbox):
360
+ tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
361
+ tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
362
+ if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
363
+ tempimg[:,:,:,k] = imresample(tmp, (24, 24))
364
+ else:
365
+ return np.empty()
366
+ tempimg = (tempimg-127.5)*0.0078125
367
+ tempimg1 = np.transpose(tempimg, (3,1,0,2))
368
+ out = rnet(tempimg1)
369
+ out0 = np.transpose(out[0])
370
+ out1 = np.transpose(out[1])
371
+ score = out1[1,:]
372
+ ipass = np.where(score>threshold[1])
373
+ total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
374
+ mv = out0[:,ipass[0]]
375
+ if total_boxes.shape[0]>0:
376
+ pick = nms(total_boxes, 0.7, 'Union')
377
+ total_boxes = total_boxes[pick,:]
378
+ total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
379
+ total_boxes = rerec(total_boxes.copy())
380
+
381
+ numbox = total_boxes.shape[0]
382
+ if numbox>0:
383
+ # third stage
384
+ total_boxes = np.fix(total_boxes).astype(np.int32)
385
+ dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
386
+ tempimg = np.zeros((48,48,3,numbox))
387
+ for k in range(0,numbox):
388
+ tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
389
+ tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
390
+ if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
391
+ tempimg[:,:,:,k] = imresample(tmp, (48, 48))
392
+ else:
393
+ return np.empty()
394
+ tempimg = (tempimg-127.5)*0.0078125
395
+ tempimg1 = np.transpose(tempimg, (3,1,0,2))
396
+ out = onet(tempimg1)
397
+ out0 = np.transpose(out[0])
398
+ out1 = np.transpose(out[1])
399
+ out2 = np.transpose(out[2])
400
+ score = out2[1,:]
401
+ points = out1
402
+ ipass = np.where(score>threshold[2])
403
+ points = points[:,ipass[0]]
404
+ total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
405
+ mv = out0[:,ipass[0]]
406
+
407
+ w = total_boxes[:,2]-total_boxes[:,0]+1
408
+ h = total_boxes[:,3]-total_boxes[:,1]+1
409
+ points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
410
+ points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
411
+ if total_boxes.shape[0]>0:
412
+ total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
413
+ pick = nms(total_boxes.copy(), 0.7, 'Min')
414
+ total_boxes = total_boxes[pick,:]
415
+ points = points[:,pick]
416
+
417
+ return total_boxes, points
418
+
419
+
420
+ def bulk_detect_face(images, detection_window_size_ratio, pnet, rnet, onet, threshold, factor):
421
+ # im: input image
422
+ # minsize: minimum of faces' size
423
+ # pnet, rnet, onet: caffemodel
424
+ # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold [0-1]
425
+
426
+ all_scales = [None] * len(images)
427
+ images_with_boxes = [None] * len(images)
428
+
429
+ for i in range(len(images)):
430
+ images_with_boxes[i] = {'total_boxes': np.empty((0, 9))}
431
+
432
+ # create scale pyramid
433
+ for index, img in enumerate(images):
434
+ all_scales[index] = []
435
+ h = img.shape[0]
436
+ w = img.shape[1]
437
+ minsize = int(detection_window_size_ratio * np.minimum(w, h))
438
+ factor_count = 0
439
+ minl = np.amin([h, w])
440
+ if minsize <= 12:
441
+ minsize = 12
442
+
443
+ m = 12.0 / minsize
444
+ minl = minl * m
445
+ while minl >= 12:
446
+ all_scales[index].append(m * np.power(factor, factor_count))
447
+ minl = minl * factor
448
+ factor_count += 1
449
+
450
+ # # # # # # # # # # # # #
451
+ # first stage - fast proposal network (pnet) to obtain face candidates
452
+ # # # # # # # # # # # # #
453
+
454
+ images_obj_per_resolution = {}
455
+
456
+ # TODO: use some type of rounding to number module 8 to increase probability that pyramid images will have the same resolution across input images
457
+
458
+ for index, scales in enumerate(all_scales):
459
+ h = images[index].shape[0]
460
+ w = images[index].shape[1]
461
+
462
+ for scale in scales:
463
+ hs = int(np.ceil(h * scale))
464
+ ws = int(np.ceil(w * scale))
465
+
466
+ if (ws, hs) not in images_obj_per_resolution:
467
+ images_obj_per_resolution[(ws, hs)] = []
468
+
469
+ im_data = imresample(images[index], (hs, ws))
470
+ im_data = (im_data - 127.5) * 0.0078125
471
+ img_y = np.transpose(im_data, (1, 0, 2)) # caffe uses different dimensions ordering
472
+ images_obj_per_resolution[(ws, hs)].append({'scale': scale, 'image': img_y, 'index': index})
473
+
474
+ for resolution in images_obj_per_resolution:
475
+ images_per_resolution = [i['image'] for i in images_obj_per_resolution[resolution]]
476
+ outs = pnet(images_per_resolution)
477
+
478
+ for index in range(len(outs[0])):
479
+ scale = images_obj_per_resolution[resolution][index]['scale']
480
+ image_index = images_obj_per_resolution[resolution][index]['index']
481
+ out0 = np.transpose(outs[0][index], (1, 0, 2))
482
+ out1 = np.transpose(outs[1][index], (1, 0, 2))
483
+
484
+ boxes, _ = generateBoundingBox(out1[:, :, 1].copy(), out0[:, :, :].copy(), scale, threshold[0])
485
+
486
+ # inter-scale nms
487
+ pick = nms(boxes.copy(), 0.5, 'Union')
488
+ if boxes.size > 0 and pick.size > 0:
489
+ boxes = boxes[pick, :]
490
+ images_with_boxes[image_index]['total_boxes'] = np.append(images_with_boxes[image_index]['total_boxes'],
491
+ boxes,
492
+ axis=0)
493
+
494
+ for index, image_obj in enumerate(images_with_boxes):
495
+ numbox = image_obj['total_boxes'].shape[0]
496
+ if numbox > 0:
497
+ h = images[index].shape[0]
498
+ w = images[index].shape[1]
499
+ pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Union')
500
+ image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
501
+ regw = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0]
502
+ regh = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1]
503
+ qq1 = image_obj['total_boxes'][:, 0] + image_obj['total_boxes'][:, 5] * regw
504
+ qq2 = image_obj['total_boxes'][:, 1] + image_obj['total_boxes'][:, 6] * regh
505
+ qq3 = image_obj['total_boxes'][:, 2] + image_obj['total_boxes'][:, 7] * regw
506
+ qq4 = image_obj['total_boxes'][:, 3] + image_obj['total_boxes'][:, 8] * regh
507
+ image_obj['total_boxes'] = np.transpose(np.vstack([qq1, qq2, qq3, qq4, image_obj['total_boxes'][:, 4]]))
508
+ image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
509
+ image_obj['total_boxes'][:, 0:4] = np.fix(image_obj['total_boxes'][:, 0:4]).astype(np.int32)
510
+ dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
511
+
512
+ numbox = image_obj['total_boxes'].shape[0]
513
+ tempimg = np.zeros((24, 24, 3, numbox))
514
+
515
+ if numbox > 0:
516
+ for k in range(0, numbox):
517
+ tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
518
+ tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
519
+ if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
520
+ tempimg[:, :, :, k] = imresample(tmp, (24, 24))
521
+ else:
522
+ return np.empty()
523
+
524
+ tempimg = (tempimg - 127.5) * 0.0078125
525
+ image_obj['rnet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
526
+
527
+ # # # # # # # # # # # # #
528
+ # second stage - refinement of face candidates with rnet
529
+ # # # # # # # # # # # # #
530
+
531
+ bulk_rnet_input = np.empty((0, 24, 24, 3))
532
+ for index, image_obj in enumerate(images_with_boxes):
533
+ if 'rnet_input' in image_obj:
534
+ bulk_rnet_input = np.append(bulk_rnet_input, image_obj['rnet_input'], axis=0)
535
+
536
+ out = rnet(bulk_rnet_input)
537
+ out0 = np.transpose(out[0])
538
+ out1 = np.transpose(out[1])
539
+ score = out1[1, :]
540
+
541
+ i = 0
542
+ for index, image_obj in enumerate(images_with_boxes):
543
+ if 'rnet_input' not in image_obj:
544
+ continue
545
+
546
+ rnet_input_count = image_obj['rnet_input'].shape[0]
547
+ score_per_image = score[i:i + rnet_input_count]
548
+ out0_per_image = out0[:, i:i + rnet_input_count]
549
+
550
+ ipass = np.where(score_per_image > threshold[1])
551
+ image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
552
+ np.expand_dims(score_per_image[ipass].copy(), 1)])
553
+
554
+ mv = out0_per_image[:, ipass[0]]
555
+
556
+ if image_obj['total_boxes'].shape[0] > 0:
557
+ h = images[index].shape[0]
558
+ w = images[index].shape[1]
559
+ pick = nms(image_obj['total_boxes'], 0.7, 'Union')
560
+ image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
561
+ image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv[:, pick]))
562
+ image_obj['total_boxes'] = rerec(image_obj['total_boxes'].copy())
563
+
564
+ numbox = image_obj['total_boxes'].shape[0]
565
+
566
+ if numbox > 0:
567
+ tempimg = np.zeros((48, 48, 3, numbox))
568
+ image_obj['total_boxes'] = np.fix(image_obj['total_boxes']).astype(np.int32)
569
+ dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(image_obj['total_boxes'].copy(), w, h)
570
+
571
+ for k in range(0, numbox):
572
+ tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
573
+ tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = images[index][y[k] - 1:ey[k], x[k] - 1:ex[k], :]
574
+ if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
575
+ tempimg[:, :, :, k] = imresample(tmp, (48, 48))
576
+ else:
577
+ return np.empty()
578
+ tempimg = (tempimg - 127.5) * 0.0078125
579
+ image_obj['onet_input'] = np.transpose(tempimg, (3, 1, 0, 2))
580
+
581
+ i += rnet_input_count
582
+
583
+ # # # # # # # # # # # # #
584
+ # third stage - further refinement and facial landmarks positions with onet
585
+ # # # # # # # # # # # # #
586
+
587
+ bulk_onet_input = np.empty((0, 48, 48, 3))
588
+ for index, image_obj in enumerate(images_with_boxes):
589
+ if 'onet_input' in image_obj:
590
+ bulk_onet_input = np.append(bulk_onet_input, image_obj['onet_input'], axis=0)
591
+
592
+ out = onet(bulk_onet_input)
593
+
594
+ out0 = np.transpose(out[0])
595
+ out1 = np.transpose(out[1])
596
+ out2 = np.transpose(out[2])
597
+ score = out2[1, :]
598
+ points = out1
599
+
600
+ i = 0
601
+ ret = []
602
+ for index, image_obj in enumerate(images_with_boxes):
603
+ if 'onet_input' not in image_obj:
604
+ ret.append(None)
605
+ continue
606
+
607
+ onet_input_count = image_obj['onet_input'].shape[0]
608
+
609
+ out0_per_image = out0[:, i:i + onet_input_count]
610
+ score_per_image = score[i:i + onet_input_count]
611
+ points_per_image = points[:, i:i + onet_input_count]
612
+
613
+ ipass = np.where(score_per_image > threshold[2])
614
+ points_per_image = points_per_image[:, ipass[0]]
615
+
616
+ image_obj['total_boxes'] = np.hstack([image_obj['total_boxes'][ipass[0], 0:4].copy(),
617
+ np.expand_dims(score_per_image[ipass].copy(), 1)])
618
+ mv = out0_per_image[:, ipass[0]]
619
+
620
+ w = image_obj['total_boxes'][:, 2] - image_obj['total_boxes'][:, 0] + 1
621
+ h = image_obj['total_boxes'][:, 3] - image_obj['total_boxes'][:, 1] + 1
622
+ points_per_image[0:5, :] = np.tile(w, (5, 1)) * points_per_image[0:5, :] + np.tile(
623
+ image_obj['total_boxes'][:, 0], (5, 1)) - 1
624
+ points_per_image[5:10, :] = np.tile(h, (5, 1)) * points_per_image[5:10, :] + np.tile(
625
+ image_obj['total_boxes'][:, 1], (5, 1)) - 1
626
+
627
+ if image_obj['total_boxes'].shape[0] > 0:
628
+ image_obj['total_boxes'] = bbreg(image_obj['total_boxes'].copy(), np.transpose(mv))
629
+ pick = nms(image_obj['total_boxes'].copy(), 0.7, 'Min')
630
+ image_obj['total_boxes'] = image_obj['total_boxes'][pick, :]
631
+ points_per_image = points_per_image[:, pick]
632
+
633
+ ret.append((image_obj['total_boxes'], points_per_image))
634
+ else:
635
+ ret.append(None)
636
+
637
+ i += onet_input_count
638
+
639
+ return ret
640
+
641
+
642
+ # function [boundingbox] = bbreg(boundingbox,reg)
643
+ def bbreg(boundingbox,reg):
644
+ # calibrate bounding boxes
645
+ if reg.shape[1]==1:
646
+ reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
647
+
648
+ w = boundingbox[:,2]-boundingbox[:,0]+1
649
+ h = boundingbox[:,3]-boundingbox[:,1]+1
650
+ b1 = boundingbox[:,0]+reg[:,0]*w
651
+ b2 = boundingbox[:,1]+reg[:,1]*h
652
+ b3 = boundingbox[:,2]+reg[:,2]*w
653
+ b4 = boundingbox[:,3]+reg[:,3]*h
654
+ boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
655
+ return boundingbox
656
+
657
+ def generateBoundingBox(imap, reg, scale, t):
658
+ # use heatmap to generate bounding boxes
659
+ stride=2
660
+ cellsize=12
661
+
662
+ imap = np.transpose(imap)
663
+ dx1 = np.transpose(reg[:,:,0])
664
+ dy1 = np.transpose(reg[:,:,1])
665
+ dx2 = np.transpose(reg[:,:,2])
666
+ dy2 = np.transpose(reg[:,:,3])
667
+ y, x = np.where(imap >= t)
668
+ if y.shape[0]==1:
669
+ dx1 = np.flipud(dx1)
670
+ dy1 = np.flipud(dy1)
671
+ dx2 = np.flipud(dx2)
672
+ dy2 = np.flipud(dy2)
673
+ score = imap[(y,x)]
674
+ reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
675
+ if reg.size==0:
676
+ reg = np.empty((0,3))
677
+ bb = np.transpose(np.vstack([y,x]))
678
+ q1 = np.fix((stride*bb+1)/scale)
679
+ q2 = np.fix((stride*bb+cellsize-1+1)/scale)
680
+ boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
681
+ return boundingbox, reg
682
+
683
+ # function pick = nms(boxes,threshold,type)
684
+ def nms(boxes, threshold, method):
685
+ if boxes.size==0:
686
+ return np.empty((0,3))
687
+ x1 = boxes[:,0]
688
+ y1 = boxes[:,1]
689
+ x2 = boxes[:,2]
690
+ y2 = boxes[:,3]
691
+ s = boxes[:,4]
692
+ area = (x2-x1+1) * (y2-y1+1)
693
+ I = np.argsort(s)
694
+ pick = np.zeros_like(s, dtype=np.int16)
695
+ counter = 0
696
+ while I.size>0:
697
+ i = I[-1]
698
+ pick[counter] = i
699
+ counter += 1
700
+ idx = I[0:-1]
701
+ xx1 = np.maximum(x1[i], x1[idx])
702
+ yy1 = np.maximum(y1[i], y1[idx])
703
+ xx2 = np.minimum(x2[i], x2[idx])
704
+ yy2 = np.minimum(y2[i], y2[idx])
705
+ w = np.maximum(0.0, xx2-xx1+1)
706
+ h = np.maximum(0.0, yy2-yy1+1)
707
+ inter = w * h
708
+ if method is 'Min':
709
+ o = inter / np.minimum(area[i], area[idx])
710
+ else:
711
+ o = inter / (area[i] + area[idx] - inter)
712
+ I = I[np.where(o<=threshold)]
713
+ pick = pick[0:counter]
714
+ return pick
715
+
716
+ # function [dy edy dx edx y ey x ex tmpw tmph] = pad(total_boxes,w,h)
717
+ def pad(total_boxes, w, h):
718
+ # compute the padding coordinates (pad the bounding boxes to square)
719
+ tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
720
+ tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
721
+ numbox = total_boxes.shape[0]
722
+
723
+ dx = np.ones((numbox), dtype=np.int32)
724
+ dy = np.ones((numbox), dtype=np.int32)
725
+ edx = tmpw.copy().astype(np.int32)
726
+ edy = tmph.copy().astype(np.int32)
727
+
728
+ x = total_boxes[:,0].copy().astype(np.int32)
729
+ y = total_boxes[:,1].copy().astype(np.int32)
730
+ ex = total_boxes[:,2].copy().astype(np.int32)
731
+ ey = total_boxes[:,3].copy().astype(np.int32)
732
+
733
+ tmp = np.where(ex>w)
734
+ edx.flat[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],1)
735
+ ex[tmp] = w
736
+
737
+ tmp = np.where(ey>h)
738
+ edy.flat[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],1)
739
+ ey[tmp] = h
740
+
741
+ tmp = np.where(x<1)
742
+ dx.flat[tmp] = np.expand_dims(2-x[tmp],1)
743
+ x[tmp] = 1
744
+
745
+ tmp = np.where(y<1)
746
+ dy.flat[tmp] = np.expand_dims(2-y[tmp],1)
747
+ y[tmp] = 1
748
+
749
+ return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
750
+
751
+ # function [bboxA] = rerec(bboxA)
752
+ def rerec(bboxA):
753
+ # convert bboxA to square
754
+ h = bboxA[:,3]-bboxA[:,1]
755
+ w = bboxA[:,2]-bboxA[:,0]
756
+ l = np.maximum(w, h)
757
+ bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
758
+ bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
759
+ bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
760
+ return bboxA
761
+
762
+ def imresample(img, sz):
763
+ im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA) #@UndefinedVariable
764
+ return im_data
765
+
766
+ # This method is kept for debugging purpose
767
+ # h=img.shape[0]
768
+ # w=img.shape[1]
769
+ # hs, ws = sz
770
+ # dx = float(w) / ws
771
+ # dy = float(h) / hs
772
+ # im_data = np.zeros((hs,ws,3))
773
+ # for a1 in range(0,hs):
774
+ # for a2 in range(0,ws):
775
+ # for a3 in range(0,3):
776
+ # im_data[a1,a2,a3] = img[int(floor(a1*dy)),int(floor(a2*dx)),a3]
777
+ # return im_data
778
+
face_recognition.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+ import cv2
5
+ import numpy as np
6
+ import facenet
7
+ import detect_face
8
+ import os
9
+ import time
10
+ import pickle
11
+ from PIL import Image
12
+ import tensorflow.compat.v1 as tf
13
+
14
+ source = 0
15
+ modeldir = './model/20180402-114759.pb'
16
+ classifier_filename = './class/classifier.pkl'
17
+ npy ='./npy'
18
+ train_img ="./train_img"
19
+ with tf.Graph().as_default():
20
+ gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
21
+ sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
22
+ with sess.as_default():
23
+ pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)
24
+ minsize = 30 # minimum size of face
25
+ threshold = [0.7, 0.8, 0.8] # three steps's threshold
26
+ factor = 0.709 # scale factor
27
+ margin = 44
28
+ batch_size = 100 #1000
29
+ image_size = 182
30
+ input_image_size = 160
31
+ HumanNames = os.listdir(train_img)
32
+ HumanNames.sort()
33
+ print('Loading Model')
34
+ facenet.load_model(modeldir)
35
+ images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
36
+ embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
37
+ phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
38
+ embedding_size = embeddings.get_shape()[1]
39
+ classifier_filename_exp = os.path.expanduser(classifier_filename)
40
+ with open(classifier_filename_exp, 'rb') as infile:
41
+ (model, class_names) = pickle.load(infile, encoding='latin1')
42
+
43
+ video_capture = cv2.VideoCapture(source)
44
+ print('Start Recognition')
45
+ while True:
46
+ ret, frame = video_capture.read()
47
+ #frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5) #resize frame (optional)
48
+ # timer = time.time()
49
+ if frame.ndim == 2:
50
+ frame = facenet.to_rgb(frame)
51
+ bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
52
+ faceNum = bounding_boxes.shape[0]
53
+ if faceNum > 0:
54
+ det = bounding_boxes[:, 0:4]
55
+ img_size = np.asarray(frame.shape)[0:2]
56
+ cropped = []
57
+ scaled = []
58
+ scaled_reshape = []
59
+ for i in range(faceNum):
60
+ emb_array = np.zeros((1, embedding_size))
61
+ x_min = int(det[i][0])
62
+ y_min = int(det[i][1])
63
+ x_max = int(det[i][2])
64
+ y_max = int(det[i][3])
65
+ try:
66
+ # inner exception
67
+ if x_min <= 0 or y_min <= 0 or x_max >= len(frame[0]) or y_max >= len(frame):
68
+ print('Face is very close!')
69
+ continue
70
+ cropped.append(frame[y_min: y_max, x_min: x_max, :])
71
+ cropped[i] = facenet.flip(cropped[i], False)
72
+ scaled.append(np.array(Image. fromarray(cropped[i]). resize((image_size, image_size))))
73
+ scaled[i] = cv2.resize(scaled[i], (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC)
74
+ scaled[i] = facenet.prewhiten(scaled[i])
75
+ scaled_reshape.append(scaled[i].reshape(-1, input_image_size, input_image_size, 3))
76
+ feed_dict = {images_placeholder: scaled_reshape[i], phase_train_placeholder: False}
77
+ emb_array[0, :] = sess.run(embeddings, feed_dict=feed_dict)
78
+ predictions = model.predict_proba(emb_array)
79
+ best_class_indices = np.argmax(predictions, axis=1)
80
+ best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
81
+
82
+ if best_class_probabilities > 0.5:
83
+ cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (236, 0, 242), 2) #boxing face
84
+ for H_i in HumanNames:
85
+ if HumanNames[best_class_indices[0]] == H_i:
86
+ result_names = HumanNames[best_class_indices[0]]
87
+ print("Predictions : [ name: {} , accuracy: {:.3f} ]".format(HumanNames[best_class_indices[0]], best_class_probabilities[0]))
88
+ # cv2.rectangle(frame, (x_min, y_min-20), (x_max, y_min-2), (0, 255,255), -1)
89
+ cv2.putText(frame, result_names, (x_min, y_min-5), cv2.FONT_HERSHEY_COMPLEX_SMALL,
90
+ 1, (236, 0, 242), thickness=1, lineType=1)
91
+
92
+ else:
93
+ cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (236, 0, 242), 2)
94
+ # cv2.rectangle(frame, (x_min, y_min-20), (x_max, y_min-2), (0, 255, 255), -1)
95
+ cv2.putText(frame, "????????", (x_min, y_min-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (236, 0, 242), thickness=1, lineType=1)
96
+ except Exception as ex:
97
+ print("There's an error occurred: ", str(ex))
98
+
99
+ cv2.imshow('Face Recognition', frame)
100
+ key = cv2.waitKey(1)
101
+ if key == 113: # "q"
102
+ break
103
+ video_capture.release()
104
+ cv2.destroyAllWindows()
facenet.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions for building the face recognition network.
2
+ """
3
+ # MIT License
4
+ #
5
+ # Copyright (c) 2016 David Sandberg
6
+ #
7
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ # of this software and associated documentation files (the "Software"), to deal
9
+ # in the Software without restriction, including without limitation the rights
10
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ # copies of the Software, and to permit persons to whom the Software is
12
+ # furnished to do so, subject to the following conditions:
13
+ #
14
+ # The above copyright notice and this permission notice shall be included in all
15
+ # copies or substantial portions of the Software.
16
+ #
17
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ # SOFTWARE.
24
+
25
+ # pylint: disable=missing-docstring
26
+ from __future__ import absolute_import
27
+ from __future__ import division
28
+ from __future__ import print_function
29
+ import imageio
30
+ import os
31
+ from subprocess import Popen, PIPE
32
+ import tensorflow.compat.v1 as tf
33
+ from tensorflow.python.framework import ops
34
+ import numpy as np
35
+ from scipy import misc
36
+ from sklearn.model_selection import KFold
37
+ from scipy import interpolate
38
+ from tensorflow.python.training import training
39
+ import random
40
+ import re
41
+ from tensorflow.python.platform import gfile
42
+
43
+
44
+ def triplet_loss(anchor, positive, negative, alpha):
45
+ """Calculate the triplet loss according to the FaceNet paper
46
+
47
+ Args:
48
+ anchor: the embeddings for the anchor images.
49
+ positive: the embeddings for the positive images.
50
+ negative: the embeddings for the negative images.
51
+
52
+ Returns:
53
+ the triplet loss according to the FaceNet paper as a float tensor.
54
+ """
55
+ with tf.variable_scope('triplet_loss'):
56
+ pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
57
+ neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
58
+
59
+ basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
60
+ loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
61
+
62
+ return loss
63
+
64
+ def decov_loss(xs):
65
+ """Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf
66
+ 'Reducing Overfitting In Deep Networks by Decorrelating Representation'
67
+ """
68
+ x = tf.reshape(xs, [int(xs.get_shape()[0]), -1])
69
+ m = tf.reduce_mean(x, 0, True)
70
+ z = tf.expand_dims(x-m, 2)
71
+ corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0,2,1])), 0)
72
+ corr_frob_sqr = tf.reduce_sum(tf.square(corr))
73
+ corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr)))
74
+ loss = 0.5*(corr_frob_sqr - corr_diag_sqr)
75
+ return loss
76
+
77
+ def center_loss(features, label, alfa, nrof_classes):
78
+ """Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
79
+ (http://ydwen.github.io/papers/WenECCV16.pdf)
80
+ """
81
+ nrof_features = features.get_shape()[1]
82
+ centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
83
+ initializer=tf.constant_initializer(0), trainable=False)
84
+ label = tf.reshape(label, [-1])
85
+ centers_batch = tf.gather(centers, label)
86
+ diff = (1 - alfa) * (centers_batch - features)
87
+ centers = tf.scatter_sub(centers, label, diff)
88
+ loss = tf.reduce_mean(tf.square(features - centers_batch))
89
+ return loss, centers
90
+
91
+ def get_image_paths_and_labels(dataset):
92
+ image_paths_flat = []
93
+ labels_flat = []
94
+ for i in range(len(dataset)):
95
+ image_paths_flat += dataset[i].image_paths
96
+ labels_flat += [i] * len(dataset[i].image_paths)
97
+ return image_paths_flat, labels_flat
98
+
99
+ def shuffle_examples(image_paths, labels):
100
+ shuffle_list = list(zip(image_paths, labels))
101
+ random.shuffle(shuffle_list)
102
+ image_paths_shuff, labels_shuff = zip(*shuffle_list)
103
+ return image_paths_shuff, labels_shuff
104
+
105
+ def read_images_from_disk(input_queue):
106
+ """Consumes a single filename and label as a ' '-delimited string.
107
+ Args:
108
+ filename_and_label_tensor: A scalar string tensor.
109
+ Returns:
110
+ Two tensors: the decoded image, and the string label.
111
+ """
112
+ label = input_queue[1]
113
+ file_contents = tf.read_file(input_queue[0])
114
+ example = tf.image.decode_png(file_contents, channels=3)
115
+ return example, label
116
+
117
+ def random_rotate_image(image):
118
+ angle = np.random.uniform(low=-10.0, high=10.0)
119
+ return misc.imrotate(image, angle, 'bicubic')
120
+
121
+ def read_and_augment_data(image_list, label_list, image_size, batch_size, max_nrof_epochs,
122
+ random_crop, random_flip, random_rotate, nrof_preprocess_threads, shuffle=True):
123
+
124
+ images = ops.convert_to_tensor(image_list, dtype=tf.string)
125
+ labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
126
+
127
+ # Makes an input queue
128
+ input_queue = tf.train.slice_input_producer([images, labels],
129
+ num_epochs=max_nrof_epochs, shuffle=shuffle)
130
+
131
+ images_and_labels = []
132
+ for _ in range(nrof_preprocess_threads):
133
+ image, label = read_images_from_disk(input_queue)
134
+ if random_rotate:
135
+ image = tf.py_func(random_rotate_image, [image], tf.uint8)
136
+ if random_crop:
137
+ image = tf.random_crop(image, [image_size, image_size, 3])
138
+ else:
139
+ image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)
140
+ if random_flip:
141
+ image = tf.image.random_flip_left_right(image)
142
+ #pylint: disable=no-member
143
+ image.set_shape((image_size, image_size, 3))
144
+ image = tf.image.per_image_standardization(image)
145
+ images_and_labels.append([image, label])
146
+
147
+ image_batch, label_batch = tf.train.batch_join(
148
+ images_and_labels, batch_size=batch_size,
149
+ capacity=4 * nrof_preprocess_threads * batch_size,
150
+ allow_smaller_final_batch=True)
151
+
152
+ return image_batch, label_batch
153
+
154
+ def _add_loss_summaries(total_loss):
155
+ """Add summaries for losses.
156
+
157
+ Generates moving average for all losses and associated summaries for
158
+ visualizing the performance of the network.
159
+
160
+ Args:
161
+ total_loss: Total loss from loss().
162
+ Returns:
163
+ loss_averages_op: op for generating moving averages of losses.
164
+ """
165
+ # Compute the moving average of all individual losses and the total loss.
166
+ loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
167
+ losses = tf.get_collection('losses')
168
+ loss_averages_op = loss_averages.apply(losses + [total_loss])
169
+
170
+ # Attach a scalar summmary to all individual losses and the total loss; do the
171
+ # same for the averaged version of the losses.
172
+ for l in losses + [total_loss]:
173
+ # Name each loss as '(raw)' and name the moving average version of the loss
174
+ # as the original loss name.
175
+ tf.summary.scalar(l.op.name +' (raw)', l)
176
+ tf.summary.scalar(l.op.name, loss_averages.average(l))
177
+
178
+ return loss_averages_op
179
+
180
+ def train(total_loss, global_step, optimizer, learning_rate, moving_average_decay, update_gradient_vars, log_histograms=True):
181
+ # Generate moving averages of all losses and associated summaries.
182
+ loss_averages_op = _add_loss_summaries(total_loss)
183
+
184
+ # Compute gradients.
185
+ with tf.control_dependencies([loss_averages_op]):
186
+ if optimizer=='ADAGRAD':
187
+ opt = tf.train.AdagradOptimizer(learning_rate)
188
+ elif optimizer=='ADADELTA':
189
+ opt = tf.train.AdadeltaOptimizer(learning_rate, rho=0.9, epsilon=1e-6)
190
+ elif optimizer=='ADAM':
191
+ opt = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=0.1)
192
+ elif optimizer=='RMSPROP':
193
+ opt = tf.train.RMSPropOptimizer(learning_rate, decay=0.9, momentum=0.9, epsilon=1.0)
194
+ elif optimizer=='MOM':
195
+ opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
196
+ else:
197
+ raise ValueError('Invalid optimization algorithm')
198
+
199
+ grads = opt.compute_gradients(total_loss, update_gradient_vars)
200
+
201
+ # Apply gradients.
202
+ apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
203
+
204
+ # Add histograms for trainable variables.
205
+ if log_histograms:
206
+ for var in tf.trainable_variables():
207
+ tf.summary.histogram(var.op.name, var)
208
+
209
+ # Add histograms for gradients.
210
+ if log_histograms:
211
+ for grad, var in grads:
212
+ if grad is not None:
213
+ tf.summary.histogram(var.op.name + '/gradients', grad)
214
+
215
+ # Track the moving averages of all trainable variables.
216
+ variable_averages = tf.train.ExponentialMovingAverage(
217
+ moving_average_decay, global_step)
218
+ variables_averages_op = variable_averages.apply(tf.trainable_variables())
219
+
220
+ with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
221
+ train_op = tf.no_op(name='train')
222
+
223
+ return train_op
224
+
225
+ def prewhiten(x):
226
+ mean = np.mean(x)
227
+ std = np.std(x)
228
+ std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
229
+ y = np.multiply(np.subtract(x, mean), 1/std_adj)
230
+ return y
231
+
232
+ def crop(image, random_crop, image_size):
233
+ if image.shape[1]>image_size:
234
+ sz1 = int(image.shape[1]//2)
235
+ sz2 = int(image_size//2)
236
+ if random_crop:
237
+ diff = sz1-sz2
238
+ (h, v) = (np.random.randint(-diff, diff+1), np.random.randint(-diff, diff+1))
239
+ else:
240
+ (h, v) = (0,0)
241
+ image = image[(sz1-sz2+v):(sz1+sz2+v),(sz1-sz2+h):(sz1+sz2+h),:]
242
+ return image
243
+
244
+ def flip(image, random_flip):
245
+ if random_flip and np.random.choice([True, False]):
246
+ image = np.fliplr(image)
247
+ return image
248
+
249
+ def to_rgb(img):
250
+ w, h = img.shape
251
+ ret = np.empty((w, h, 3), dtype=np.uint8)
252
+ ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
253
+ return ret
254
+
255
+ def load_data(image_paths, do_random_crop, do_random_flip, image_size, do_prewhiten=True):
256
+ nrof_samples = len(image_paths)
257
+ images = np.zeros((nrof_samples, image_size, image_size, 3))
258
+ for i in range(nrof_samples):
259
+ img = imageio.imread(image_paths[i])
260
+ if img.ndim == 2:
261
+ img = to_rgb(img)
262
+ if do_prewhiten:
263
+ img = prewhiten(img)
264
+ img = crop(img, do_random_crop, image_size)
265
+ img = flip(img, do_random_flip)
266
+ images[i,:,:,:] = img
267
+ return images
268
+
269
+ def get_label_batch(label_data, batch_size, batch_index):
270
+ nrof_examples = np.size(label_data, 0)
271
+ j = batch_index*batch_size % nrof_examples
272
+ if j+batch_size<=nrof_examples:
273
+ batch = label_data[j:j+batch_size]
274
+ else:
275
+ x1 = label_data[j:nrof_examples]
276
+ x2 = label_data[0:nrof_examples-j]
277
+ batch = np.vstack([x1,x2])
278
+ batch_int = batch.astype(np.int64)
279
+ return batch_int
280
+
281
+ def get_batch(image_data, batch_size, batch_index):
282
+ nrof_examples = np.size(image_data, 0)
283
+ j = batch_index*batch_size % nrof_examples
284
+ if j+batch_size<=nrof_examples:
285
+ batch = image_data[j:j+batch_size,:,:,:]
286
+ else:
287
+ x1 = image_data[j:nrof_examples,:,:,:]
288
+ x2 = image_data[0:nrof_examples-j,:,:,:]
289
+ batch = np.vstack([x1,x2])
290
+ batch_float = batch.astype(np.float32)
291
+ return batch_float
292
+
293
+ def get_triplet_batch(triplets, batch_index, batch_size):
294
+ ax, px, nx = triplets
295
+ a = get_batch(ax, int(batch_size/3), batch_index)
296
+ p = get_batch(px, int(batch_size/3), batch_index)
297
+ n = get_batch(nx, int(batch_size/3), batch_index)
298
+ batch = np.vstack([a, p, n])
299
+ return batch
300
+
301
+ def get_learning_rate_from_file(filename, epoch):
302
+ with open(filename, 'r') as f:
303
+ for line in f.readlines():
304
+ line = line.split('#', 1)[0]
305
+ if line:
306
+ par = line.strip().split(':')
307
+ e = int(par[0])
308
+ lr = float(par[1])
309
+ if e <= epoch:
310
+ learning_rate = lr
311
+ else:
312
+ return learning_rate
313
+
314
+ class ImageClass():
315
+ "Stores the paths to images for a given class"
316
+ def __init__(self, name, image_paths):
317
+ self.name = name
318
+ self.image_paths = image_paths
319
+
320
+ def __str__(self):
321
+ return self.name + ', ' + str(len(self.image_paths)) + ' images'
322
+
323
+ def __len__(self):
324
+ return len(self.image_paths)
325
+
326
+ def get_dataset(paths, has_class_directories=True):
327
+ dataset = []
328
+ for path in paths.split(':'):
329
+ path_exp = os.path.expanduser(path)
330
+ classes = os.listdir(path_exp)
331
+ classes.sort()
332
+ nrof_classes = len(classes)
333
+ for i in range(nrof_classes):
334
+ class_name = classes[i]
335
+ facedir = os.path.join(path_exp, class_name)
336
+ image_paths = get_image_paths(facedir)
337
+ dataset.append(ImageClass(class_name, image_paths))
338
+
339
+ return dataset
340
+
341
+ def get_image_paths(facedir):
342
+ image_paths = []
343
+ if os.path.isdir(facedir):
344
+ images = os.listdir(facedir)
345
+ image_paths = [os.path.join(facedir,img) for img in images]
346
+ return image_paths
347
+
348
+ def split_dataset(dataset, split_ratio, mode):
349
+ if mode=='SPLIT_CLASSES':
350
+ nrof_classes = len(dataset)
351
+ class_indices = np.arange(nrof_classes)
352
+ np.random.shuffle(class_indices)
353
+ split = int(round(nrof_classes*split_ratio))
354
+ train_set = [dataset[i] for i in class_indices[0:split]]
355
+ test_set = [dataset[i] for i in class_indices[split:-1]]
356
+ elif mode=='SPLIT_IMAGES':
357
+ train_set = []
358
+ test_set = []
359
+ min_nrof_images = 2
360
+ for cls in dataset:
361
+ paths = cls.image_paths
362
+ np.random.shuffle(paths)
363
+ split = int(round(len(paths)*split_ratio))
364
+ if split<min_nrof_images:
365
+ continue # Not enough images for test set. Skip class...
366
+ train_set.append(ImageClass(cls.name, paths[0:split]))
367
+ test_set.append(ImageClass(cls.name, paths[split:-1]))
368
+ else:
369
+ raise ValueError('Invalid train/test split mode "%s"' % mode)
370
+ return train_set, test_set
371
+
372
+ def load_model(model):
373
+ # Check if the model is a model directory (containing a metagraph and a checkpoint file)
374
+ # or if it is a protobuf file with a frozen graph
375
+ model_exp = os.path.expanduser(model)
376
+ if (os.path.isfile(model_exp)):
377
+ print('Model filename: %s' % model_exp)
378
+ with gfile.FastGFile(model_exp,'rb') as f:
379
+ graph_def = tf.GraphDef()
380
+ graph_def.ParseFromString(f.read())
381
+ tf.import_graph_def(graph_def, name='')
382
+ else:
383
+ print('Model directory: %s' % model_exp)
384
+ meta_file, ckpt_file = get_model_filenames(model_exp)
385
+
386
+ print('Metagraph file: %s' % meta_file)
387
+ print('Checkpoint file: %s' % ckpt_file)
388
+
389
+ saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))
390
+ saver.restore(tf.get_default_session(), os.path.join(model_exp, ckpt_file))
391
+
392
+ def get_model_filenames(model_dir):
393
+ files = os.listdir(model_dir)
394
+ meta_files = [s for s in files if s.endswith('.meta')]
395
+ if len(meta_files)==0:
396
+ raise ValueError('No meta file found in the model directory (%s)' % model_dir)
397
+ elif len(meta_files)>1:
398
+ raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)
399
+ meta_file = meta_files[0]
400
+ meta_files = [s for s in files if '.ckpt' in s]
401
+ max_step = -1
402
+ for f in files:
403
+ step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
404
+ if step_str is not None and len(step_str.groups())>=2:
405
+ step = int(step_str.groups()[1])
406
+ if step > max_step:
407
+ max_step = step
408
+ ckpt_file = step_str.groups()[0]
409
+ return meta_file, ckpt_file
410
+
411
+ def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
412
+ assert(embeddings1.shape[0] == embeddings2.shape[0])
413
+ assert(embeddings1.shape[1] == embeddings2.shape[1])
414
+ nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
415
+ nrof_thresholds = len(thresholds)
416
+ k_fold = KFold(n_splits=nrof_folds, shuffle=False)
417
+
418
+ tprs = np.zeros((nrof_folds,nrof_thresholds))
419
+ fprs = np.zeros((nrof_folds,nrof_thresholds))
420
+ accuracy = np.zeros((nrof_folds))
421
+
422
+ diff = np.subtract(embeddings1, embeddings2)
423
+ dist = np.sum(np.square(diff),1)
424
+ indices = np.arange(nrof_pairs)
425
+
426
+ for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
427
+
428
+ # Find the best threshold for the fold
429
+ acc_train = np.zeros((nrof_thresholds))
430
+ for threshold_idx, threshold in enumerate(thresholds):
431
+ _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
432
+ best_threshold_index = np.argmax(acc_train)
433
+ for threshold_idx, threshold in enumerate(thresholds):
434
+ tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
435
+ _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
436
+
437
+ tpr = np.mean(tprs,0)
438
+ fpr = np.mean(fprs,0)
439
+ return tpr, fpr, accuracy
440
+
441
+ def calculate_accuracy(threshold, dist, actual_issame):
442
+ predict_issame = np.less(dist, threshold)
443
+ tp = np.sum(np.logical_and(predict_issame, actual_issame))
444
+ fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
445
+ tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
446
+ fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
447
+
448
+ tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
449
+ fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
450
+ acc = float(tp+tn)/dist.size
451
+ return tpr, fpr, acc
452
+
453
+
454
+
455
+ def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
456
+ assert(embeddings1.shape[0] == embeddings2.shape[0])
457
+ assert(embeddings1.shape[1] == embeddings2.shape[1])
458
+ nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
459
+ nrof_thresholds = len(thresholds)
460
+ k_fold = KFold(n_splits=nrof_folds, shuffle=False)
461
+
462
+ val = np.zeros(nrof_folds)
463
+ far = np.zeros(nrof_folds)
464
+
465
+ diff = np.subtract(embeddings1, embeddings2)
466
+ dist = np.sum(np.square(diff),1)
467
+ indices = np.arange(nrof_pairs)
468
+
469
+ for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
470
+
471
+ # Find the threshold that gives FAR = far_target
472
+ far_train = np.zeros(nrof_thresholds)
473
+ for threshold_idx, threshold in enumerate(thresholds):
474
+ _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
475
+ if np.max(far_train)>=far_target:
476
+ f = interpolate.interp1d(far_train, thresholds, kind='slinear')
477
+ threshold = f(far_target)
478
+ else:
479
+ threshold = 0.0
480
+
481
+ val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
482
+
483
+ val_mean = np.mean(val)
484
+ far_mean = np.mean(far)
485
+ val_std = np.std(val)
486
+ return val_mean, val_std, far_mean
487
+
488
+
489
+ def calculate_val_far(threshold, dist, actual_issame):
490
+ predict_issame = np.less(dist, threshold)
491
+ true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
492
+ false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
493
+ n_same = np.sum(actual_issame)
494
+ n_diff = np.sum(np.logical_not(actual_issame))
495
+ val = float(true_accept) / float(n_same)
496
+ far = float(false_accept) / float(n_diff)
497
+ return val, far
498
+
499
+ def store_revision_info(src_path, output_dir, arg_string):
500
+
501
+ # Get git hash
502
+ gitproc = Popen(['git', 'rev-parse', 'HEAD'], stdout = PIPE, cwd=src_path)
503
+ (stdout, _) = gitproc.communicate()
504
+ git_hash = stdout.strip()
505
+
506
+ # Get local changes
507
+ gitproc = Popen(['git', 'diff', 'HEAD'], stdout = PIPE, cwd=src_path)
508
+ (stdout, _) = gitproc.communicate()
509
+ git_diff = stdout.strip()
510
+
511
+ # Store a text file in the log directory
512
+ rev_info_filename = os.path.join(output_dir, 'revision_info.txt')
513
+ with open(rev_info_filename, "w") as text_file:
514
+ text_file.write('arguments: %s\n--------------------\n' % arg_string)
515
+ text_file.write('git hash: %s\n--------------------\n' % git_hash)
516
+ text_file.write('%s' % git_diff)
517
+
518
+ def list_variables(filename):
519
+ reader = training.NewCheckpointReader(filename)
520
+ variable_map = reader.get_variable_to_shape_map()
521
+ names = sorted(variable_map.keys())
522
+ return names
523
+
524
+ def put_images_on_grid(images, shape=(16,8)):
525
+ nrof_images = images.shape[0]
526
+ img_size = images.shape[1]
527
+ bw = 3
528
+ img = np.zeros((shape[1]*(img_size+bw)+bw, shape[0]*(img_size+bw)+bw, 3), np.float32)
529
+ for i in range(shape[1]):
530
+ x_start = i*(img_size+bw)+bw
531
+ for j in range(shape[0]):
532
+ img_index = i*shape[0]+j
533
+ if img_index>=nrof_images:
534
+ break
535
+ y_start = j*(img_size+bw)+bw
536
+ img[x_start:x_start+img_size, y_start:y_start+img_size, :] = images[img_index, :, :, :]
537
+ if img_index>=nrof_images:
538
+ break
539
+ return img
540
+
541
+ def write_arguments_to_file(args, filename):
542
+ with open(filename, 'w') as f:
543
+ for key, value in vars(args).iteritems():
544
+ f.write('%s: %s\n' % (key, str(value)))
haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
 
insert_new_faces.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+ import os
4
+
5
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
6
+ cap = cv2.VideoCapture(0)
7
+
8
+ folder_name = input("Nhập tên thư mục để lưu ảnh: ")
9
+ parent_dir = "train_img"
10
+ folder_path = os.path.join(parent_dir, folder_name)
11
+
12
+ if not os.path.exists(parent_dir):
13
+ os.makedirs(parent_dir)
14
+
15
+ if not os.path.exists(folder_path):
16
+ os.makedirs(folder_path)
17
+
18
+ image_count = 0
19
+ last_capture_time = 0
20
+
21
+ while True:
22
+ ret, frame = cap.read()
23
+
24
+ if not ret:
25
+ break
26
+
27
+ frame = cv2.flip(frame, 1)
28
+ original_frame = frame.copy()
29
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
30
+ faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
31
+
32
+ for (x, y, w, h) in faces:
33
+ cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
34
+
35
+ current_time = time.time()
36
+ if len(faces) > 0 and (current_time - last_capture_time) >= 1:
37
+ image_path = os.path.join(folder_path, f"{folder_name}_{int(image_count)}.png")
38
+ cv2.imwrite(image_path, original_frame)
39
+ print(f"Ảnh đã lưu với tên {image_path}")
40
+ last_capture_time = current_time
41
+ image_count += 1
42
+
43
+ if image_count >= 30:
44
+ print(f"Đã chụp đủ 30 ảnh, dừng chương trình. Ảnh đã được lưu tại: {folder_path}")
45
+ break
46
+
47
+ cv2.imshow('Webcam', frame)
48
+
49
+ if cv2.waitKey(1) & 0xFF == ord('q'):
50
+ break
51
+
52
+ cap.release()
53
+ cv2.destroyAllWindows()
model/20180402-114759.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf2c12f31880aaa865fa5a9c168dcbd619f7a40b1633f6446d416fac2421ab99
3
+ size 95745767
npy/det1.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3285cf7a3de2651c5784cb9e32013f5919aae95fd1ed1bc371dd9691affb39af
3
+ size 27368
npy/det2.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:716b8b83e42476791c9096f14dbb09fefc88bf5c7ec876b1683f9acd52b3f39c
3
+ size 401681
npy/det3.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:396ead803d85d3443307ff8f45fb6aed2536579b415a4f4d4cb8f93ea6b1476a
3
+ size 1557360
preprocess.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+ from scipy import misc
5
+ import os
6
+ import tensorflow.compat.v1 as tf
7
+
8
+ import numpy as np
9
+ import facenet
10
+ import detect_face
11
+ import imageio
12
+ from PIL import Image
13
+
14
+ class preprocesses:
15
+ def __init__(self, input_datadir, output_datadir):
16
+ self.input_datadir = input_datadir
17
+ self.output_datadir = output_datadir
18
+
19
+ def collect_data(self):
20
+ output_dir = os.path.expanduser(self.output_datadir)
21
+ if not os.path.exists(output_dir):
22
+ os.makedirs(output_dir)
23
+
24
+ dataset = facenet.get_dataset(self.input_datadir)
25
+ with tf.Graph().as_default():
26
+ gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
27
+ sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
28
+ with sess.as_default():
29
+ pnet, rnet, onet = detect_face.create_mtcnn(sess, './npy')
30
+
31
+ minsize = 20 # minimum size of face
32
+ threshold = [0.5, 0.6, 0.6] # three steps's threshold
33
+ factor = 0.709 # scale factor
34
+ margin = 44
35
+ image_size = 182
36
+
37
+ random_key = np.random.randint(0, high=99999)
38
+ bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
39
+
40
+ with open(bounding_boxes_filename, "w") as text_file:
41
+ nrof_images_total = 0
42
+ nrof_successfully_aligned = 0
43
+ for cls in dataset:
44
+ output_class_dir = os.path.join(output_dir, cls.name)
45
+ if not os.path.exists(output_class_dir):
46
+ os.makedirs(output_class_dir)
47
+ for image_path in cls.image_paths:
48
+ nrof_images_total += 1
49
+ filename = os.path.splitext(os.path.split(image_path)[1])[0]
50
+ output_filename = os.path.join(output_class_dir, filename + '.png')
51
+ print("Image: %s" % image_path)
52
+ if not os.path.exists(output_filename):
53
+ try:
54
+ img = imageio.imread(image_path)
55
+ except (IOError, ValueError, IndexError) as e:
56
+ errorMessage = '{}: {}'.format(image_path, e)
57
+ print(errorMessage)
58
+ else:
59
+ if img.ndim < 2:
60
+ print('Unable to align "%s"' % image_path)
61
+ text_file.write('%s\n' % (output_filename))
62
+ continue
63
+ if img.ndim == 2:
64
+ img = facenet.to_rgb(img)
65
+ print('to_rgb data dimension: ', img.ndim)
66
+ img = img[:, :, 0:3]
67
+
68
+ bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold,
69
+ factor)
70
+ nrof_faces = bounding_boxes.shape[0]
71
+ print('No of Detected Face: %d' % nrof_faces)
72
+ if nrof_faces > 0:
73
+ det = bounding_boxes[:, 0:4]
74
+ img_size = np.asarray(img.shape)[0:2]
75
+ if nrof_faces > 1:
76
+ bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
77
+ img_center = img_size / 2
78
+ offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
79
+ (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
80
+ offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
81
+ index = np.argmax(
82
+ bounding_box_size - offset_dist_squared * 2.0) # some extra weight on the centering
83
+ det = det[index, :]
84
+ det = np.squeeze(det)
85
+ bb_temp = np.zeros(4, dtype=np.int32)
86
+
87
+ # Ensure bounding box is within image boundaries
88
+ bb_temp[0] = np.maximum(det[0], 0)
89
+ bb_temp[1] = np.maximum(det[1], 0)
90
+ bb_temp[2] = np.minimum(det[2], img_size[1])
91
+ bb_temp[3] = np.minimum(det[3], img_size[0])
92
+
93
+ cropped_temp = img[bb_temp[1]:bb_temp[3], bb_temp[0]:bb_temp[2], :]
94
+
95
+ # Check if the cropped region has a valid size before resizing
96
+ if cropped_temp.shape[0] > 0 and cropped_temp.shape[1] > 0:
97
+ scaled_temp = np.array(Image.fromarray(cropped_temp).resize((image_size, image_size)))
98
+ nrof_successfully_aligned += 1
99
+ imageio.imwrite(output_filename, scaled_temp)
100
+ text_file.write('%s %d %d %d %d\n' % (output_filename, bb_temp[0], bb_temp[1], bb_temp[2], bb_temp[3]))
101
+ else:
102
+ print(f"Skipped resizing for image {image_path} due to invalid crop size")
103
+ text_file.write('%s\n' % (output_filename))
104
+ else:
105
+ print('Unable to align "%s"' % image_path)
106
+ text_file.write('%s\n' % (output_filename))
107
+
108
+ return (nrof_images_total, nrof_successfully_aligned)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ tensorflow==2.10.0
2
+ pandas
3
+ numpy
4
+ opencv-python
5
+ scikit-learn
train_img/Kien.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5468dee12d066674187d498be0c31a60c114716e97688a888e07811ddd03cfb0
3
+ size 37586440
train_img/Phong.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca2e3ccef34eaeed17aa8105008ccb7ec295d0a78660404f9ee04eb8028a826e
3
+ size 24837843
train_main.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+ import sys
5
+ from classifier import training
6
+ import os
7
+
8
+ datadir = './aligned_img'
9
+ modeldir = './model/20180402-114759.pb'
10
+ #modeldir = './model/20170511-185253.pb'
11
+ classifier_filename = './class/classifier.pkl'
12
+ print("Training Start")
13
+ obj=training(datadir, modeldir, classifier_filename)
14
+ get_file=obj.main_train()
15
+ print('Saved classifier model to file "%s"' % get_file)
16
+
17
+ sys.exit("All Done")
utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import pickle as pkl
4
+ import yaml
5
+ from collections import defaultdict
6
+ import tensorflow.compat.v1 as tf
7
+ import facenet
8
+ from PIL import Image
9
+
10
+ infomation = defaultdict(dict)
11
+ cfg = yaml.load(open('config.yaml', 'r'), Loader=yaml.FullLoader)
12
+ MODEL_DIR = cfg['PATH']['MODEL_DIR']
13
+ CLASSIFIER_DIR = cfg['PATH']['CLASSIFIER_DIR']
14
+ NPY_DIR = cfg['PATH']['NPY_DIR']
15
+ TRAIN_IMG_DIR = cfg['PATH']['TRAIN_IMG_DIR']
16
+
17
+ def get_elements_dir(x):
18
+ path = x
19
+ return path
20
+
21
+ def load_essentail_components():
22
+ model_dir = get_elements_dir(MODEL_DIR)
23
+ classifier_filename = get_elements_dir(CLASSIFIER_DIR)
24
+ npy = get_elements_dir(NPY_DIR)
25
+ train_img = get_elements_dir(TRAIN_IMG_DIR)
26
+ return model_dir, classifier_filename, npy, train_img
27
+
28
+ def gpu_session():
29
+ gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
30
+ sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
31
+ return sess
32
+
33
+ def configure_mtcnn(sess, npy, train_img):
34
+ pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)
35
+ minsize = 30 # minimum size of face
36
+ threshold = [0.7, 0.8, 0.8] # three steps's threshold
37
+ factor = 0.709 # scale factor
38
+ margin = 44
39
+ batch_size = 100 # 1000
40
+ image_size = 182
41
+ input_image_size = 160
42
+ HumanNames = os.listdir(train_img)
43
+ HumanNames.sort()
44
+
45
+ def recognize(image):
46
+ model_dir, classifier_filename, npy, train_img = load_essentail_components()
47
+
48
+ with tf.Graph().as_default():
49
+ sess = gpu_session()
50
+ with sess.as_default():
51
+ configure_mtcnn(sess, npy, train_img)
52
+ print('Loading Model ...')
53
+ facenet.load_model(model=model_dir)
54
+ images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
55
+ embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
56
+ phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
57
+ embedding_size = embeddings.get_shape()[1]
58
+ classifier_filename_exp = os.path.expanduser(classifier_filename)
59
+ with open(classifier_filename_exp, 'rb') as infile:
60
+ (model, class_names) = pickle.load(infile, encoding='latin1')
61
+
62
+ if image.ndim == 2:
63
+ image = facenet.to_rgb(image)
64
+ bounding_boxes, _ = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
65
+ faceNum = bounding_boxes.shape[0]
66
+