face_keypoint / app.py
itmorn's picture
init
7304409
raw
history blame
1.99 kB
import cv2
import gradio as gr
from z_app_factory import get_app
def inference(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
lst2d_res = get_app(image)
thickness = 3
lineType = 8
font = cv2.FONT_HERSHEY_SIMPLEX
for face in lst2d_res:
bbox = [int(i) for i in face["bbox"]]
score = face['score']
point_color = (0, int(255 * score), 0) # BGR
x1, y1 = bbox[:2]
x2, y2 = bbox[2:]
cv2.putText(image, str(score)[:4], (x1, y1 - 10), font, 0.8, (0, 255, 0), 2)
cv2.line(image, (x1, y1), (x2, y1), point_color, thickness, lineType)
cv2.line(image, (x2, y1), (x2, y2), point_color, thickness, lineType)
cv2.line(image, (x1, y1), (x1, y2), point_color, thickness, lineType)
cv2.line(image, (x1, y2), (x2, y2), point_color, thickness, lineType)
for kp in face["kps"]:
x, y = [int(i) for i in kp]
cv2.circle(image, (x, y), 2, (2, 30, 200), 2)
landmarks = face["landmarks"]
lst = [[int(i) for i in kp] for kp in landmarks]
for i, kp in enumerate(lst):
x, y = kp
cv2.circle(image, (x, y), 2, (200, 200, 20), 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
title = "Face Keypoint"
description = "demo for Face Keypoint. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://www.yuque.com/itmorn/ability/face_keypoint' target='_blank'>Project Documents</a> | <a href='https://www.bilibili.com/video/BV1DN4y1P7j3' target='_blank'>Video Demo</a></p>"
gr.Interface(
inference,
[gr.inputs.Image(label="Input")],
gr.outputs.Image(type="pil", label="Output"),
title=title,
description=description,
article=article,
examples=[
["imgs/face1.jpg"],
["imgs/face2.jpg"],
["imgs/cc.png"]
]).launch(debug=True)