import gradio as gr
import os
import requests
from PIL import Image
def face_compare(frame1, frame2):
url = "https://faceapi.miniai.live/face_compare"
files = {'file1': open(frame1, 'rb'), 'file2': open(frame2, 'rb')}
r = requests.post(url=url, files=files)
html = None
faces = None
compare_result = r.json().get('compare_result')
compare_similarity = r.json().get('compare_similarity')
html = ("
"
""
"State | "
"Value | "
"
"
""
"Is same person? | "
"{compare_result} | "
"
"
""
"Similarity | "
"{compare_similarity} | "
"
"
"
".format(compare_result=compare_result, compare_similarity=compare_similarity))
try:
image1 = Image.open(frame1)
image2 = Image.open(frame2)
face1 = None
face2 = None
if r.json().get('face1') is not None:
face = r.json().get('face1')
x1 = face.get('x1')
y1 = face.get('y1')
x2 = face.get('x2')
y2 = face.get('y2')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image1.width:
x2 = image1.width - 1
if y2 >= image1.height:
y2 = image1.height - 1
face1 = image1.crop((x1, y1, x2, y2))
face_image_ratio = face1.width / float(face1.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face1 = face1.resize((int(resized_w), int(resized_h)))
if r.json().get('face2') is not None:
face = r.json().get('face2')
x1 = face.get('x1')
y1 = face.get('y1')
x2 = face.get('x2')
y2 = face.get('y2')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image2.width:
x2 = image2.width - 1
if y2 >= image2.height:
y2 = image2.height - 1
face2 = image2.crop((x1, y1, x2, y2))
face_image_ratio = face2.width / float(face2.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face2 = face2.resize((int(resized_w), int(resized_h)))
if face1 is not None and face2 is not None:
new_image = Image.new('RGB',(face1.width + face2.width + 10, 150), (80,80,80))
new_image.paste(face1,(0,0))
new_image.paste(face2,(face1.width + 10, 0))
faces = new_image.copy()
elif face1 is not None and face2 is None:
new_image = Image.new('RGB',(face1.width + face1.width + 10, 150), (80,80,80))
new_image.paste(face1,(0,0))
faces = new_image.copy()
elif face1 is None and face2 is not None:
new_image = Image.new('RGB',(face2.width + face2.width + 10, 150), (80,80,80))
new_image.paste(face2,(face2.width + 10, 0))
faces = new_image.copy()
except:
pass
return [faces, html]
def check_liveness(frame):
url = "https://faceapi.miniai.live/face_liveness_check"
file = {'file': open(frame, 'rb')}
r = requests.post(url=url, files=file)
faceCount = None
response_data = r.json()
for item in response_data.get('face_state', []):
if 'faceCount' in item:
faceCount = item['faceCount']
break
faces = None
live_result = []
live_result.append(f"FaceID | Age | Gender | Liveness |
")
for item in response_data.get('face_state', []):
if item.get('FaceID'):
faceID = item.get('FaceID')
result = item.get('LivenessCheck')
age = item.get('Age')
gender = item.get('Gender')
live_result.append(f"{faceID} | {age} | {gender} | {result} |
")
live_result.append(f"
")
live_result = ''.join(live_result)
try:
image = Image.open(frame)
for face in r.json().get('faces'):
x1 = face.get('x1')
y1 = face.get('y1')
x2 = face.get('x2')
y2 = face.get('y2')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image.width:
x2 = image.width - 1
if y2 >= image.height:
y2 = image.height - 1
face_image = image.crop((x1, y1, x2, y2))
face_image_ratio = face_image.width / float(face_image.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face_image = face_image.resize((int(resized_w), int(resized_h)))
if faces is None:
faces = face_image
else:
new_image = Image.new('RGB',(faces.width + face_image.width + 10, 150), (80,80,80))
new_image.paste(faces,(0,0))
new_image.paste(face_image,(faces.width + 10, 0))
faces = new_image.copy()
except:
pass
return [faces, live_result]
def face_emotion(frame):
url = "https://faceapi.miniai.live/face_emotion"
file = {'file': open(frame, 'rb')}
r = requests.post(url=url, files=file)
emotion_result = []
emotion_result.append(f"Emotional Result : | {r.json().get('emotion_result')} |
")
emotion_result.append(f"
")
emotion_result = ''.join(emotion_result)
faces = None
try:
image = Image.open(frame)
for face in r.json().get('faces'):
x1 = face.get('x1')
y1 = face.get('y1')
x2 = face.get('x2')
y2 = face.get('y2')
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= image.width:
x2 = image.width - 1
if y2 >= image.height:
y2 = image.height - 1
face_image = image.crop((x1, y1, x2, y2))
face_image_ratio = face_image.width / float(face_image.height)
resized_w = int(face_image_ratio * 150)
resized_h = 150
face_image = face_image.resize((int(resized_w), int(resized_h)))
if faces is None:
faces = face_image
else:
new_image = Image.new('RGB',(faces.width + face_image.width + 10, 150), (80,80,80))
new_image.paste(faces,(0,0))
new_image.paste(face_image,(faces.width + 10, 0))
faces = new_image.copy()
except:
pass
return [faces, emotion_result]
# APP Interface
with gr.Blocks() as MiniAIdemo:
gr.Markdown(
"""
FaceSDK Web Online Demo
Experience our NIST FRVT Top Ranked FaceRecognition, iBeta 2 Certified Face Liveness Detection Engine
"""
)
with gr.Tabs():
with gr.Tab("Face Recognition"):
with gr.Row():
with gr.Column():
im_match_in1 = gr.Image(type='filepath', height=300)
gr.Examples(
[
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic22.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic60.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic35.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic33.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic34.jpg"),
],
inputs=im_match_in1
)
with gr.Column():
im_match_in2 = gr.Image(type='filepath', height=300)
gr.Examples(
[
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic41.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic32.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic39.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic61.jpg"),
os.path.join(os.path.dirname(__file__), "images/compare/demo-pic40.jpg"),
],
inputs=im_match_in2
)
with gr.Column():
im_match_crop = gr.Image(type="pil", height=256)
txt_compare_out = gr.HTML()
btn_f_match = gr.Button("Check Comparing!", variant='primary')
btn_f_match.click(face_compare, inputs=[im_match_in1, im_match_in2], outputs=[im_match_crop, txt_compare_out])
with gr.Tab("Face Liveness Detection"):
with gr.Row():
with gr.Column(scale=1):
im_liveness_in = gr.Image(type='filepath', height=300)
gr.Examples(
[
os.path.join(os.path.dirname(__file__), "images/liveness/f_real_andr.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/f_fake_andr_mask3d.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/f_fake_andr_monitor.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/f_fake_andr_outline.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/f_fake_andr_outline3d.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/1.jpg"),
os.path.join(os.path.dirname(__file__), "images/liveness/3.png"),
os.path.join(os.path.dirname(__file__), "images/liveness/4.jpg"),
],
inputs=im_liveness_in
)
btn_f_liveness = gr.Button("Check Liveness!", variant='primary')
with gr.Blocks():
with gr.Row():
with gr.Column():
im_liveness_out = gr.Image(label="Croped Face", type="pil", scale=1)
with gr.Column():
livness_result_output = gr.HTML()
btn_f_liveness.click(check_liveness, inputs=im_liveness_in, outputs=[im_liveness_out, livness_result_output])
with gr.Tab("Face Emotional Recognition"):
with gr.Row():
with gr.Column():
im_emotion_in = gr.Image(type='filepath', height=300)
gr.Examples(
[
os.path.join(os.path.dirname(__file__), "images/emotion/1.jpg"),
os.path.join(os.path.dirname(__file__), "images/emotion/2.jpg"),
os.path.join(os.path.dirname(__file__), "images/emotion/3.jpg"),
os.path.join(os.path.dirname(__file__), "images/emotion/4.jpg"),
os.path.join(os.path.dirname(__file__), "images/emotion/5.jpg"),
os.path.join(os.path.dirname(__file__), "images/emotion/6.jpg"),
],
inputs=im_emotion_in
)
btn_f_emotion = gr.Button("Check Emotion!", variant='primary')
with gr.Blocks():
with gr.Row():
with gr.Column():
im_emotion_out = gr.Image(label="Result Image", type="pil", scale=1)
with gr.Column():
txt_emotion_out = gr.HTML()
btn_f_emotion.click(face_emotion, inputs=im_emotion_in, outputs=[im_emotion_out, txt_emotion_out])
if __name__ == "__main__":
MiniAIdemo.launch()