File size: 3,463 Bytes
e99cf64 b90cadd e99cf64 b90cadd e99cf64 26d283c e99cf64 a479a6d d11643d a479a6d c5e9575 a479a6d d11643d a479a6d 1982a65 e99cf64 5952ecb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
from typing import Tuple
import cv2
import gradio as gr
import numpy as np
from mediapipe.python.solutions import drawing_utils
from mediapipe.python.solutions.face_detection import FaceDetection
from blurriness_estimation import BlurrinessEstimator
from closed_eyes_detection import ClosedEyesDetector
from closed_eyes_detection.eyes_extractor import EyesExtractor
from config import InferenceConfig, BlurrinessConfig
fd = FaceDetection()
def draw_face_detection(img):
"""
Draws MediaPipe's face detection result on the input image.
Args:
img: input image, processed in place.
"""
results = fd.process(img)
if results.detections:
for detection in results.detections:
drawing_utils.draw_detection(img, detection)
def detect_closed_eyes(img: np.ndarray) -> Tuple[np.ndarray, str]:
"""
Runs the Closed Eyes Detector
Args:
img: input image
Returns:
str: Whether the eyes are opened or closed.
"""
if img is None:
raise ValueError("No image provided")
detector = ClosedEyesDetector(InferenceConfig())
extractor = EyesExtractor()
is_closed = detector(img)
if is_closed is not None:
img = extractor.extract_eyes(img)
b, h, w, c = img.shape
vertical_separator = 255 * np.ones((h, 5, c)).astype(np.int8)
img = np.hstack((img[1], vertical_separator, img[0]))
s = "Eyes are closed." if is_closed else "Eyes are open."
else:
s = "No face detected"
return img, s
def detect_blur(img: np.ndarray) -> Tuple[np.ndarray, str]:
"""
Runs the Blurriness Detector
Args:
img: input image
Returns:
str: Whether the input image is blurry
"""
if img is None:
raise ValueError("No image provided")
cfg = BlurrinessConfig()
be = BlurrinessEstimator(cfg)
laplacian = np.absolute(cv2.Laplacian(img, cv2.CV_64F)).astype(np.int8)
is_blurry, score = be(img)
s = f"Sharpness threshold: {cfg.threshold}"
s += f"\nScore: {score:.0f}\n"
s += "Image is blurry (Score < threshold)" if is_blurry else "Image is sharp (score > threshold)."
return laplacian, s
demo = gr.Blocks()
with demo:
gr.Markdown("Detect closed eyes or estimate blurriness.")
with gr.Tabs():
with gr.TabItem("Detect closed eyes"):
with gr.Row():
with gr.Column():
eye_cam_input = gr.Image(sources="webcam", streaming=True)
eyes_button = gr.Button("Detect")
with gr.Column():
detection_output = gr.Image(label="Detection result (sorry for the creepiness)", )
detection_output_text = gr.Textbox(label='Result')
with gr.TabItem("Estimate blur"):
with gr.Row():
with gr.Column():
blur_cam_input = gr.Image(sources="webcam", streaming=True)
blur_button = gr.Button("Estimate blur")
with gr.Column():
laplacian_output = gr.Image(label="Laplacian filter view")
blurriness_estimation_text = gr.Textbox(label='Result')
eyes_button.click(detect_closed_eyes, inputs=[eye_cam_input], outputs=[detection_output, detection_output_text])
blur_button.click(detect_blur, inputs=[blur_cam_input], outputs=[laplacian_output, blurriness_estimation_text])
if __name__ == "__main__":
demo.launch()
|