Spaces:
Runtime error
Runtime error
File size: 5,383 Bytes
1b3fb15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
from deepface import DeepFace
from deepface.detectors import FaceDetector, OpenCvWrapper
from deepface.extendedmodels import Emotion
import cv2
import deepface.commons.functions
import numpy
import opennsfw2
class Emotion:
labels = [emotion.capitalize() for emotion in Emotion.labels]
model = DeepFace.build_model('Emotion')
class NSFW:
labels = [False, True]
model = opennsfw2.make_open_nsfw_model()
################################################################################
class Pixels(numpy.ndarray):
@classmethod
def read(cls, path):
return cv2.imread(path).view(type=cls)
def write(self, path):
cv2.imwrite(path, self)
class FaceImage(Pixels):
def analyze(face_img):
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
face_img = cv2.resize(face_img, (48, 48))
face_img = numpy.expand_dims(face_img, axis=0)
predictions = Emotion.model.predict(face_img).ravel()
return Emotion.labels[numpy.argmax(predictions)]
def represent(face_img):
face_img = numpy.expand_dims(face_img, axis=0)
return DeepFace.represent(face_img,
'VGG-Face',
detector_backend='skip')[0]['embedding']
class Image(Pixels):
def annotate(img, face, emotion):
face_annotation = numpy.zeros_like(img)
face_annotation = cv2.cvtColor(face_annotation,
cv2.COLOR_BGR2GRAY).view(type=Pixels)
x, y, w, h = face
axes = (int(0.1 * w), int(0.1 * h))
cv2.ellipse(face_annotation, (x + axes[0], y + axes[1]), axes, 180, 0,
90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + w - axes[0], y + axes[1]), axes, 270,
0, 90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + axes[0], y + h - axes[1]), axes, 90,
0, 90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + w - axes[0], y + h - axes[1]), axes,
0, 0, 90, (1, 0, 0), 2)
emotion_annotation = numpy.zeros_like(img)
emotion_annotation = cv2.cvtColor(emotion_annotation,
cv2.COLOR_BGR2GRAY).view(type=Pixels)
for fontScale in numpy.arange(10, 0, -0.1):
textSize, _ = cv2.getTextSize(emotion, cv2.FONT_HERSHEY_SIMPLEX,
fontScale, 2)
if textSize[0] <= int(0.6 * w):
break
cv2.putText(emotion_annotation, emotion,
(int(x + (w - textSize[0]) / 2), int(y + textSize[1] / 2)),
cv2.FONT_HERSHEY_SIMPLEX, fontScale, (1, 0, 0), 2)
return [(face_annotation, 'face'), (emotion_annotation, 'emotion')]
def detect_faces(img):
face_detector = FaceDetector.build_model('opencv')
faces = []
for _, face, _ in FaceDetector.detect_faces(face_detector, 'opencv',
img, False):
face = (int(face[0]), int(face[1]), int(face[2]), int(face[3]))
faces.append(face)
return faces
def extract_face(img, face):
face_detector = FaceDetector.build_model('opencv')
x, y, w, h = face
img = img[y:y + h, x:x + w]
img = OpenCvWrapper.align_face(face_detector['eye_detector'], img)
target_size = deepface.commons.functions.find_target_size('VGG-Face')
face_img, _, _ = deepface.commons.functions.extract_faces(
img, target_size, 'skip')[0]
face_img = numpy.squeeze(face_img, axis=0)
return face_img.view(type=FaceImage)
def nsfw(img):
img = cv2.resize(img, (224, 224))
img = img - numpy.array([104, 117, 123], numpy.float32)
img = numpy.expand_dims(img, axis=0)
predictions = NSFW.model.predict(img).ravel()
return NSFW.labels[numpy.argmax(predictions)]
def pixelate(img):
h, w, _ = img.shape
img = cv2.resize(img, (16, 16))
return cv2.resize(img, (w, h),
interpolation=cv2.INTER_NEAREST).view(type=Pixels)
################################################################################
class Metadata(dict):
def __init__(self, img):
metadata = {}
for face in img.detect_faces():
face_img = img.extract_face(face)
emotion = face_img.analyze()
representation = face_img.represent()
metadata[face] = {
'emotion': emotion,
'representation': representation
}
super(Metadata, self).__init__(metadata)
def emotions(self):
return [value['emotion'] for value in self.values()]
def representations(self):
return [value['representation'] for value in self.values()]
################################################################################
def verify(source_representations, test_representations):
for source_representation in source_representations:
for test_representation in test_representations:
if deepface.commons.distance.findCosineDistance(
source_representation, test_representation
) < deepface.commons.distance.findThreshold('VGG-Face', 'cosine'):
return True
return False
|