|
from typing import Tuple |
|
|
|
import cv2 |
|
import gradio as gr |
|
import numpy as np |
|
from mediapipe.python.solutions import drawing_utils |
|
from mediapipe.python.solutions.face_detection import FaceDetection |
|
|
|
from blurriness_estimation import BlurrinessEstimator |
|
from closed_eyes_detection import ClosedEyesDetector |
|
from closed_eyes_detection.eyes_extractor import EyesExtractor |
|
from config import InferenceConfig, BlurrinessConfig |
|
|
|
fd = FaceDetection() |
|
|
|
|
|
def draw_face_detection(img): |
|
""" |
|
Draws MediaPipe's face detection result on the input image. |
|
Args: |
|
img: input image, processed in place. |
|
""" |
|
results = fd.process(img) |
|
if results.detections: |
|
for detection in results.detections: |
|
drawing_utils.draw_detection(img, detection) |
|
|
|
|
|
def detect_closed_eyes(img: np.ndarray) -> Tuple[np.ndarray, str]: |
|
""" |
|
Runs the Closed Eyes Detector |
|
Args: |
|
img: input image |
|
|
|
Returns: |
|
str: Whether the eyes are opened or closed. |
|
""" |
|
if img is None: |
|
raise ValueError("No image provided") |
|
|
|
detector = ClosedEyesDetector(InferenceConfig()) |
|
extractor = EyesExtractor() |
|
is_closed = detector(img) |
|
if is_closed is not None: |
|
img = extractor.extract_eyes(img) |
|
b, h, w, c = img.shape |
|
vertical_separator = 255 * np.ones((h, 5, c)).astype(np.int8) |
|
img = np.hstack((img[1], vertical_separator, img[0])) |
|
s = "Eyes are closed." if is_closed else "Eyes are open." |
|
else: |
|
s = "No face detected" |
|
return img, s |
|
|
|
|
|
def detect_blur(img: np.ndarray) -> Tuple[np.ndarray, str]: |
|
""" |
|
Runs the Blurriness Detector |
|
Args: |
|
img: input image |
|
|
|
Returns: |
|
str: Whether the input image is blurry |
|
""" |
|
if img is None: |
|
raise ValueError("No image provided") |
|
|
|
cfg = BlurrinessConfig() |
|
be = BlurrinessEstimator(cfg) |
|
laplacian = np.absolute(cv2.Laplacian(img, cv2.CV_64F)).astype(np.int8) |
|
is_blurry, score = be(img) |
|
s = f"Sharpness threshold: {cfg.threshold}" |
|
s += f"\nScore: {score:.0f}\n" |
|
s += "Image is blurry (Score < threshold)" if is_blurry else "Image is sharp (score > threshold)." |
|
|
|
return laplacian, s |
|
|
|
|
|
demo = gr.Blocks() |
|
with demo: |
|
gr.Markdown("Detect closed eyes or estimate blurriness.") |
|
with gr.Tabs(): |
|
with gr.TabItem("Detect closed eyes"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
eye_cam_input = gr.Image(sources="webcam", streaming=True) |
|
eyes_button = gr.Button("Detect") |
|
with gr.Column(): |
|
detection_output = gr.Image(label="Detection result (sorry for the creepiness)", ) |
|
detection_output_text = gr.Textbox(label='Result') |
|
with gr.TabItem("Estimate blur"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
blur_cam_input = gr.Image(sources="webcam", streaming=True) |
|
blur_button = gr.Button("Estimate blur") |
|
with gr.Column(): |
|
laplacian_output = gr.Image(label="Laplacian filter view") |
|
blurriness_estimation_text = gr.Textbox(label='Result') |
|
eyes_button.click(detect_closed_eyes, inputs=[eye_cam_input], outputs=[detection_output, detection_output_text]) |
|
blur_button.click(detect_blur, inputs=[blur_cam_input], outputs=[laplacian_output, blurriness_estimation_text]) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|