|
import numpy as np |
|
import cv2 |
|
import pandas as pd |
|
import tensorflow as tf |
|
from tensorflow import keras |
|
import time |
|
from playsound import playsound |
|
import streamlit as st |
|
|
|
|
|
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') |
|
|
|
|
|
model = keras.models.load_model('my_model (1).h5') |
|
|
|
|
|
st.title('Drowsiness Detection') |
|
img = [] |
|
|
|
|
|
nav_choice = st.sidebar.radio('Navigation', ('Home', 'Sleep Detection', 'Help Us Improve'), index=0) |
|
|
|
|
|
if nav_choice == 'Home': |
|
st.header('Preventing Sleep Deprivation Road Accidents') |
|
st.image('ISHN0619_C3_pic.jpg') |
|
st.markdown(""" |
|
In accordance with a survey by the Times Of India, approximately 40% of road accidents are caused by sleep deprivation and fatigued drivers. |
|
This app aims to address this issue by alerting drowsy drivers using deep learning models and computer vision. |
|
""") |
|
st.image('sleep.jfif', width=300) |
|
st.markdown(""" |
|
### How to Use? |
|
1. Go to the Sleep Detection page from the Navigation Sidebar. |
|
2. Ensure you have sufficient lighting in your room. |
|
3. Position yourself so that you are clearly visible in the webcam and stay close to it. |
|
4. Keep your eyes in the same state (open or closed) for about 5 seconds while the webcam captures three pictures. |
|
5. If your eyes are closed, the model will trigger a custom sound alert. |
|
6. Otherwise, the model will continue monitoring your eyes at regular intervals. |
|
**Note:** The dataset used for training the model is available [here](https://www.kaggle.com/kutaykutlu/drowsiness-detection). |
|
""") |
|
|
|
|
|
elif nav_choice == 'Sleep Detection': |
|
st.header('Image Prediction') |
|
st.success('Please look at your webcam and follow the instructions provided on the Home page.') |
|
st.warning('Keeping your eyes in the same state is crucial. You can blink if your eyes are open!') |
|
b = st.progress(0) |
|
for i in range(100): |
|
time.sleep(0.0001) |
|
b.progress(i + 1) |
|
|
|
start = st.radio('Options', ('Start', 'Stop'), key='Start_pred', index=1) |
|
|
|
if start == 'Start': |
|
decision = 0 |
|
st.markdown('<font face="Comic sans MS"><b>Detected Facial Region of Interest (ROI)      Extracted' |
|
' Eye Features from the ROI</b></font>', unsafe_allow_html=True) |
|
|
|
|
|
for _ in range(3): |
|
cap = cv2.VideoCapture(0) |
|
ret, frame = cap.read() |
|
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
faces = face_cascade.detectMultiScale(gray, 1.3, 5) |
|
|
|
|
|
for (x, y, w, h) in faces: |
|
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5) |
|
roi_gray = gray[y:y + w, x:x + w] |
|
roi_color = frame[y:y + h, x:x + w] |
|
frame1 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
|
try: |
|
|
|
centx, centy = roi_color.shape[:2] |
|
centx //= 2 |
|
centy //= 2 |
|
eye_1 = roi_color[centy - 40: centy, centx - 70: centx] |
|
eye_1 = cv2.resize(eye_1, (86, 86)) |
|
eye_2 = roi_color[centy - 40: centy, centx: centx + 70] |
|
eye_2 = cv2.resize(eye_2, (86, 86)) |
|
cv2.rectangle(frame1, (x + centx - 60, y + centy - 40), (x + centx - 10, y + centy), (0, 255, 0), 5) |
|
cv2.rectangle(frame1, (x + centx + 10, y + centy - 40), (x + centx + 60, y + centy), (0, 255, 0), 5) |
|
preds_eye1 = model.predict(np.expand_dims(eye_1, axis=0)) |
|
preds_eye2 = model.predict(np.expand_dims(eye_2, axis=0)) |
|
e1, e2 = np.argmax(preds_eye1), np.argmax(preds_eye2) |
|
|
|
|
|
img_container = st.beta_columns(4) |
|
img_container[0].image(frame1, width=250) |
|
img_container[2].image(cv2.cvtColor(eye_1, cv2.COLOR_BGR2RGB), width=150) |
|
img_container[3].image(cv2.cvtColor(eye_2, cv2.COLOR_BGR2RGB), width=150) |
|
print(e1, e2) |
|
|
|
|
|
if e1 == 1 or e2 == 1: |
|
pass |
|
else: |
|
decision += 1 |
|
|
|
except NameError: |
|
st.warning('Hold your camera closer!!!\nTrying again in 2s') |
|
cap.release() |
|
time.sleep(1) |
|
continue |
|
|
|
except: |
|
cap.release() |
|
continue |
|
|
|
finally: |
|
cap.release() |
|
|
|
|
|
if decision == 0: |
|
st.error('Eye(s) are closed') |
|
playsound("232857-84052cf6-66a1-4c60-ad86-9ebc19eaab52.mp3") |
|
|
|
else: |
|
st.success('Eyes are Opened') |
|
st.warning('Please select "Stop" and then "Start" to try again') |
|
|
|
|
|
else: |
|
st.header('Help Us Improve') |
|
st.success('We would appreciate your Help!!!') |
|
st.markdown(""" |
|
To improve this app, we need your feedback and contributions. |
|
As part of our efforts, we would like to gather more data. |
|
This will help us enhance the accuracy and usability of the app. |
|
Your identity will remain anonymous, and only your eye-patch will be extracted for analysis. |
|
""") |
|
|
|
img_upload = st.file_uploader('Upload Image Here', ['png', 'jpg', 'jpeg']) |
|
if img_upload is not None: |
|
prog = st.progress(0) |
|
to_add = cv2.imread(str(img_upload.read()), 0) |
|
to_add = pd.DataFrame(to_add) |
|
|
|
|
|
to_add.to_csv('Data_from_users.csv', mode='a', header=False, index=False, sep=';') |
|
for i in range(100): |
|
time.sleep(0.001) |
|
prog.progress(i + 1) |
|
st.success('Uploaded Successfully!!! Thank you for contributing.') |
|
|