sergey21000
commited on
Commit
•
545f568
1
Parent(s):
209c954
Upload 11 files
Browse files- .gitattributes +2 -0
- Image_Detection.py +109 -0
- detector.py +263 -0
- draw_utils.py +238 -0
- fonts/LiberationMono-Regular.ttf +0 -0
- media/emotion.jpg +0 -0
- media/emotions_detect.png +3 -0
- media/result_video.mp4 +3 -0
- pages/1_Video_Detection.py +110 -0
- pages/2_Video_Detection_Results.py +39 -0
- pages/3_Web_Camera_Photo.py +75 -0
- requirements.txt +6 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
media/emotions_detect.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
media/result_video.mp4 filter=lfs diff=lfs merge=lfs -text
|
Image_Detection.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
|
8 |
+
# ===================== Настройки страницы ======================
|
9 |
+
|
10 |
+
st.set_page_config(
|
11 |
+
layout='wide',
|
12 |
+
initial_sidebar_state='auto',
|
13 |
+
page_title='Face Emotion Recognition',
|
14 |
+
page_icon='👻',
|
15 |
+
)
|
16 |
+
|
17 |
+
st.write("#### Детекция лиц, эмоций, пола, расы и возраста на изображении")
|
18 |
+
|
19 |
+
# отображение картинки с примерами детекции
|
20 |
+
@st.cache_data
|
21 |
+
def load_main_image(image_path: str) -> Image.Image:
|
22 |
+
main_pil_image = Image.open(image_path)
|
23 |
+
return main_pil_image
|
24 |
+
|
25 |
+
MAIN_IMAGE_PATH = './media/emotions_detect.png'
|
26 |
+
main_pil_image = load_main_image(MAIN_IMAGE_PATH)
|
27 |
+
st.image(main_pil_image, width=800)
|
28 |
+
|
29 |
+
# ===================== Боковое меню настроек ======================
|
30 |
+
|
31 |
+
st.sidebar.header('Настройки')
|
32 |
+
st.sidebar.write('---')
|
33 |
+
|
34 |
+
face_conf_threshold = st.sidebar.slider(
|
35 |
+
label='Порог уверенности для детекции лиц',
|
36 |
+
min_value=0.0,
|
37 |
+
max_value=1.0,
|
38 |
+
value=0.7,
|
39 |
+
step=0.01,
|
40 |
+
)
|
41 |
+
st.sidebar.write('---')
|
42 |
+
|
43 |
+
# какие действия нужно детектить (пока не реализовано переключение)
|
44 |
+
actions = ['age', 'gender', 'race', 'emotion']
|
45 |
+
# применять ли дополнительное выравнивание
|
46 |
+
st.sidebar.write('Применять ли дополнительное выравнивание')
|
47 |
+
align = st.sidebar.checkbox(label='Align', value=False)
|
48 |
+
|
49 |
+
|
50 |
+
# загрузка и инициализация моделей
|
51 |
+
with st.spinner('Инициализация/загрузка моделей...'):
|
52 |
+
from detector import detector_model
|
53 |
+
|
54 |
+
# ==================== Загрузка изображения и распознавание ============
|
55 |
+
|
56 |
+
st_image = st.file_uploader(label='Выберите изображение')
|
57 |
+
st.session_state['detect_image_ready'] = False
|
58 |
+
|
59 |
+
if st_image:
|
60 |
+
pil_image = Image.open(st_image)
|
61 |
+
np_image_rgb = np.array(pil_image)
|
62 |
+
|
63 |
+
np_image_bgr = cv2.cvtColor(np_image_rgb, cv2.COLOR_RGB2BGR)
|
64 |
+
st.image(np_image_bgr, width=400, channels='BGR')
|
65 |
+
|
66 |
+
# ================ Кнопка распознать ==================
|
67 |
+
|
68 |
+
if st.button('Распознать'):
|
69 |
+
if detector_model.is_first_run:
|
70 |
+
spinner_text = 'Первоначальная инициализация моделей и распознавание фото...'
|
71 |
+
else:
|
72 |
+
spinner_text = 'Распознавание фото ...'
|
73 |
+
|
74 |
+
with st.spinner(spinner_text):
|
75 |
+
detections = detector_model.detect_image(
|
76 |
+
np_image_bgr,
|
77 |
+
actions=actions,
|
78 |
+
align=align,
|
79 |
+
)
|
80 |
+
detector_model.is_first_run = False
|
81 |
+
|
82 |
+
with st.spinner('Отрисовка результата'):
|
83 |
+
result_np_image = detector_model.draw_detections(
|
84 |
+
np_image_rgb=np_image_rgb,
|
85 |
+
detections=detections,
|
86 |
+
face_conf_threshold=face_conf_threshold,
|
87 |
+
)
|
88 |
+
st.session_state['detect_image_ready'] = True
|
89 |
+
|
90 |
+
# отображение результата детекции
|
91 |
+
if st.session_state['detect_image_ready']:
|
92 |
+
st.image(result_np_image)
|
93 |
+
|
94 |
+
# сохранение картинки с результатом чтобы ее скачать
|
95 |
+
image_name = f"{st_image.name.split('.')[0]}_detect.png"
|
96 |
+
file_buffer = io.BytesIO()
|
97 |
+
result_pil_image = Image.fromarray(result_np_image)
|
98 |
+
result_pil_image.save(file_buffer, format='PNG')
|
99 |
+
image_bytes = file_buffer.getvalue()
|
100 |
+
|
101 |
+
# ================ Кнопка скачать ==================
|
102 |
+
|
103 |
+
st.download_button(
|
104 |
+
label='Скачать изображение',
|
105 |
+
data=image_bytes,
|
106 |
+
file_name=image_name,
|
107 |
+
mime='image/png',
|
108 |
+
)
|
109 |
+
st.session_state['detect_image_ready'] = False
|
detector.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import Generator
|
5 |
+
|
6 |
+
import cv2
|
7 |
+
import numpy as np
|
8 |
+
import pandas as pd
|
9 |
+
|
10 |
+
from PIL import Image, ImageDraw, ImageFont
|
11 |
+
from ffmpeg import FFmpeg
|
12 |
+
|
13 |
+
# установка пути куда будут скачиваться модели DeepFace
|
14 |
+
MODELS_DIR = Path('models')
|
15 |
+
os.environ['DEEPFACE_HOME'] = str(MODELS_DIR)
|
16 |
+
|
17 |
+
|
18 |
+
# настройка Tensorflow чтобы не вылетало если мало памяти
|
19 |
+
import tensorflow as tf
|
20 |
+
|
21 |
+
os.environ['TF_GPU_ALLOCATOR'] = 'cuda_malloc_async'
|
22 |
+
gpus = tf.config.experimental.list_physical_devices('GPU')
|
23 |
+
if gpus:
|
24 |
+
try:
|
25 |
+
for gpu in gpus:
|
26 |
+
tf.config.experimental.set_memory_growth(gpu, True)
|
27 |
+
except RuntimeError as ex:
|
28 |
+
print(ex)
|
29 |
+
|
30 |
+
|
31 |
+
from deepface import DeepFace
|
32 |
+
|
33 |
+
|
34 |
+
# словарь для детекции одного лица который возвращает библиотека DeepFace
|
35 |
+
DEEPFACE_DICT = dict[str, str | int | float | dict[str, str | int | float | None]]
|
36 |
+
|
37 |
+
# датакласс для хранения результатов детекции
|
38 |
+
# если лиц не обнаружено (face_conf = 0) то устанавливает все значения на None
|
39 |
+
@dataclass
|
40 |
+
class DetectResult:
|
41 |
+
face_conf: float | None = None
|
42 |
+
gender: str | None = None
|
43 |
+
gender_conf: float | None = None
|
44 |
+
age: int | None = None
|
45 |
+
race: str | None = None
|
46 |
+
race_conf: float | None = None
|
47 |
+
emotion: str | None = None
|
48 |
+
emotion_conf: float | None = None
|
49 |
+
x: int | None = None
|
50 |
+
y: int | None = None
|
51 |
+
w: int | None = None
|
52 |
+
h: int | None = None
|
53 |
+
|
54 |
+
|
55 |
+
# если лиц не обнаружено (face_conf = 0) то устанавливает все значения на None
|
56 |
+
def __post_init__(self):
|
57 |
+
if self.face_conf == 0:
|
58 |
+
self.__dict__.update(DetectResult().__dict__)
|
59 |
+
|
60 |
+
|
61 |
+
# формирование экземпляра из словаря, который возвращает библиотека DeepFace
|
62 |
+
@classmethod
|
63 |
+
def from_dict(cls, detect_dict: DEEPFACE_DICT):
|
64 |
+
return cls(
|
65 |
+
face_conf=detect_dict['face_confidence'],
|
66 |
+
gender=detect_dict['dominant_gender'],
|
67 |
+
gender_conf=detect_dict['gender'][detect_dict['dominant_gender']],
|
68 |
+
age=detect_dict['age'],
|
69 |
+
race=detect_dict['dominant_race'],
|
70 |
+
race_conf=detect_dict['race'][detect_dict['dominant_race']],
|
71 |
+
emotion=detect_dict['dominant_emotion'],
|
72 |
+
emotion_conf=detect_dict['emotion'][detect_dict['dominant_emotion']],
|
73 |
+
x=detect_dict['region']['x'],
|
74 |
+
y=detect_dict['region']['y'],
|
75 |
+
w=detect_dict['region']['w'],
|
76 |
+
h=detect_dict['region']['h'],
|
77 |
+
)
|
78 |
+
|
79 |
+
|
80 |
+
# вернуть текущий экземпляр в качестве словаря
|
81 |
+
def as_dict(self):
|
82 |
+
return self.__dict__
|
83 |
+
|
84 |
+
|
85 |
+
# получить названия атрибутов экземпляра
|
86 |
+
def keys(self):
|
87 |
+
return self.__dict__.keys()
|
88 |
+
|
89 |
+
|
90 |
+
# видоизменить текстовое представление экземпляра
|
91 |
+
def __repr__(self):
|
92 |
+
if self.face_conf is None:
|
93 |
+
repr_text = f'DetectResult(face_conf=None'
|
94 |
+
else:
|
95 |
+
repr_text = 'DetectResult(\n ' + \
|
96 |
+
'\n '.join([f'{k}={v}' for k, v in self.as_dict().items()])
|
97 |
+
return repr_text + '\n)\n'
|
98 |
+
|
99 |
+
|
100 |
+
class Detector:
|
101 |
+
def __init__(self):
|
102 |
+
self.box_color = 'red'
|
103 |
+
self.text_color = 'yellow'
|
104 |
+
self.fill_color = 'black'
|
105 |
+
self.font_path = 'fonts/LiberationMono-Regular.ttf'
|
106 |
+
self.font_scale = 40 # чем меньше тем больше шрифт
|
107 |
+
self.detections_all_frames = []
|
108 |
+
self.save_video_path = 'result_video.mp4'
|
109 |
+
self.result_csv_path = 'video_annotations.csv'
|
110 |
+
# opencv, yunet, centerface, dlib, ssd, fastmtcnn
|
111 |
+
self.detector_backend = 'ssd'
|
112 |
+
weights_dir = MODELS_DIR / '.deepface' / 'weights'
|
113 |
+
self.is_first_run = True
|
114 |
+
|
115 |
+
if Path(self.result_csv_path).is_file():
|
116 |
+
Path(self.result_csv_path).unlink(missing_ok=True)
|
117 |
+
|
118 |
+
if len(list(weights_dir.iterdir())) == 0:
|
119 |
+
self.first_detect_and_load_weights()
|
120 |
+
|
121 |
+
|
122 |
+
# детекция случайной картинки из шума чтобы при первом запуске загрузились веса
|
123 |
+
def first_detect_and_load_weights(self):
|
124 |
+
DeepFace.analyze(
|
125 |
+
img_path=np.random.randint(0, 256, size=(28, 28, 3), dtype=np.uint8),
|
126 |
+
actions=['age', 'gender', 'race', 'emotion'],
|
127 |
+
detector_backend=self.detector_backend,
|
128 |
+
align=False,
|
129 |
+
enforce_detection=False,
|
130 |
+
silent=True,
|
131 |
+
)
|
132 |
+
|
133 |
+
|
134 |
+
# детекция одного изображения, возаращает список со словарями результатов детекций
|
135 |
+
def detect_image(self, image: str | np.ndarray, actions: list[str], align: bool) -> list[DetectResult]:
|
136 |
+
detection_dicts = DeepFace.analyze(
|
137 |
+
img_path=image,
|
138 |
+
actions=actions,
|
139 |
+
detector_backend=self.detector_backend,
|
140 |
+
align=align,
|
141 |
+
enforce_detection=False,
|
142 |
+
silent=True,
|
143 |
+
)
|
144 |
+
detections = [DetectResult.from_dict(detection) for detection in detection_dicts]
|
145 |
+
return detections
|
146 |
+
|
147 |
+
|
148 |
+
# отрисовка результатов для детекций
|
149 |
+
def draw_detections(
|
150 |
+
self,
|
151 |
+
np_image_rgb: np.ndarray,
|
152 |
+
detections: list[DetectResult],
|
153 |
+
face_conf_threshold: float,
|
154 |
+
) -> np.ndarray:
|
155 |
+
pil_image = Image.fromarray(np_image_rgb)
|
156 |
+
draw = ImageDraw.Draw(pil_image)
|
157 |
+
font_size = pil_image.size[1] // self.font_scale
|
158 |
+
# font = ImageFont.load_default(size=font_size)
|
159 |
+
font = ImageFont.truetype(self.font_path, size=font_size)
|
160 |
+
for detection in detections:
|
161 |
+
if detection.face_conf is not None:
|
162 |
+
if detection.face_conf > face_conf_threshold:
|
163 |
+
x, y, w, h = detection.x, detection.y, detection.w, detection.h
|
164 |
+
text = f'{detection.gender},{detection.age},{detection.race},{detection.emotion}'
|
165 |
+
draw.rectangle((x, y, x + w, y + h), outline=self.box_color, width=6)
|
166 |
+
_, _, text_width, text_height = font.getbbox(text)
|
167 |
+
if (x + text_width) > pil_image.size[0]:
|
168 |
+
x -= text_width / 2
|
169 |
+
draw.rectangle(xy=(x, y - text_height, x + text_width, y), fill=self.fill_color)
|
170 |
+
draw.text(xy=(x, y), text=text, font=font, fill=self.text_color, anchor='lb')
|
171 |
+
return np.array(pil_image)
|
172 |
+
|
173 |
+
|
174 |
+
# функция - генератор для детекция видео, который детектит кадры видео в цикле
|
175 |
+
# возвращает номер текущего кадра и общее кол-во кадров, чтобы сделать внешний прогресс бар
|
176 |
+
def detect_video(
|
177 |
+
self,
|
178 |
+
video_file: str | Path,
|
179 |
+
actions: list[str],
|
180 |
+
align: bool,
|
181 |
+
face_conf_threshold: float,
|
182 |
+
) -> Generator[tuple[int, int], None, None]:
|
183 |
+
|
184 |
+
cap_read = cv2.VideoCapture(str(video_file))
|
185 |
+
frames_width = int(cap_read.get(cv2.CAP_PROP_FRAME_WIDTH))
|
186 |
+
frames_height = int(cap_read.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
187 |
+
frames_fps = int(cap_read.get(cv2.CAP_PROP_FPS))
|
188 |
+
total_frames = int(cap_read.get(cv2.CAP_PROP_FRAME_COUNT))
|
189 |
+
|
190 |
+
cap_write = cv2.VideoWriter(
|
191 |
+
filename=self.save_video_path,
|
192 |
+
fourcc=cv2.VideoWriter_fourcc(*'mp4v'),
|
193 |
+
fps=frames_fps,
|
194 |
+
frameSize=(frames_width, frames_height),
|
195 |
+
)
|
196 |
+
|
197 |
+
self.detections_all_frames = []
|
198 |
+
frames_count = 0
|
199 |
+
|
200 |
+
while cap_read.isOpened():
|
201 |
+
ret, np_image_bgr = cap_read.read()
|
202 |
+
if not ret:
|
203 |
+
break
|
204 |
+
detections = self.detect_image(
|
205 |
+
image=np_image_bgr,
|
206 |
+
actions=actions,
|
207 |
+
align=align,
|
208 |
+
)
|
209 |
+
self.detections_all_frames.append(detections)
|
210 |
+
|
211 |
+
if detections[0].face_conf is not None:
|
212 |
+
np_image_rgb = cv2.cvtColor(np_image_bgr, cv2.COLOR_BGR2RGB)
|
213 |
+
result_np_image = self.draw_detections(
|
214 |
+
np_image_rgb=np_image_rgb,
|
215 |
+
detections=detections,
|
216 |
+
face_conf_threshold=face_conf_threshold,
|
217 |
+
)
|
218 |
+
np_image_bgr = cv2.cvtColor(result_np_image, cv2.COLOR_RGB2BGR)
|
219 |
+
|
220 |
+
cap_write.write(np_image_bgr)
|
221 |
+
frames_count += 1
|
222 |
+
yield (frames_count, total_frames)
|
223 |
+
|
224 |
+
cap_write.release()
|
225 |
+
cap_read.release()
|
226 |
+
|
227 |
+
|
228 |
+
# создание датафрейма из словарей с результатами детекции
|
229 |
+
# каждая строка датафрейма - порядковый номер кадра из видео
|
230 |
+
# если на кадре ничего нет то все значения кроме номера кадра и времени видео будут None
|
231 |
+
# если на кадре несколько лиц - номера кадров и время видео будут дублироваться
|
232 |
+
def detections_to_df(self) -> pd.DataFrame:
|
233 |
+
if not Path(self.save_video_path).exists():
|
234 |
+
print('Нет видео с результатами детекции')
|
235 |
+
return None
|
236 |
+
|
237 |
+
if len(self.detections_all_frames) == 0:
|
238 |
+
print('Не найдено ни одного объекта')
|
239 |
+
return None
|
240 |
+
|
241 |
+
cap_read = cv2.VideoCapture(str(self.save_video_path))
|
242 |
+
frames_fps = int(cap_read.get(cv2.CAP_PROP_FPS))
|
243 |
+
cap_read.release()
|
244 |
+
|
245 |
+
df_list = []
|
246 |
+
for frame_num, detections in enumerate(self.detections_all_frames, start=1):
|
247 |
+
for detection in detections:
|
248 |
+
df_list.append({'frame_num': frame_num, **detection.as_dict()})
|
249 |
+
|
250 |
+
df = pd.DataFrame(df_list)
|
251 |
+
df.insert(loc=1, column='frame_sec', value=df.frame_num / frames_fps)
|
252 |
+
df['face_detected'] = df['gender'].notna().astype(int)
|
253 |
+
df.to_csv(self.result_csv_path, index=False)
|
254 |
+
return self.result_csv_path
|
255 |
+
|
256 |
+
|
257 |
+
# конвертация в mp4
|
258 |
+
@staticmethod
|
259 |
+
def convert_mp4(input_video_path: str | Path, output_video_path: str | Path) -> None:
|
260 |
+
ffmpeg = FFmpeg().option('y').input(input_video_path).output(output_video_path)
|
261 |
+
ffmpeg.execute()
|
262 |
+
|
263 |
+
detector_model = Detector()
|
draw_utils.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import seaborn as sns
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
|
7 |
+
|
8 |
+
plt.style.use('dark_background')
|
9 |
+
plt.rcParams.update({'figure.figsize': (7, 1.5)})
|
10 |
+
plt.rcParams.update({'font.size': 5})
|
11 |
+
|
12 |
+
|
13 |
+
def draw_plots(df: pd.DataFrame) -> None:
|
14 |
+
df_clean = df.dropna(subset=['face_conf'])
|
15 |
+
|
16 |
+
st.write('---')
|
17 |
+
st.write('Количество детекций по возрасту и по уверенности модели')
|
18 |
+
|
19 |
+
fig, axes = plt.subplots(1, 2)
|
20 |
+
sns.histplot(df_clean['age'], kde=True, ax=axes[0])
|
21 |
+
axes[0].set_title('Распределение возраста')
|
22 |
+
axes[0].set_xlabel('Возраст')
|
23 |
+
axes[0].set_ylabel('Количество обнаружений')
|
24 |
+
sns.histplot(df_clean['face_conf'], kde=True, ax=axes[1])
|
25 |
+
axes[1].set_title('Распределение уверенности детекций')
|
26 |
+
axes[1].set_xlabel('Уверенность')
|
27 |
+
axes[1].set_ylabel('Количество обнаружений')
|
28 |
+
|
29 |
+
|
30 |
+
st.pyplot(fig, use_container_width=False)
|
31 |
+
st.write('---')
|
32 |
+
st.write('Распределение уверенности модели по классам пола и эмоций')
|
33 |
+
|
34 |
+
|
35 |
+
fig, axes = plt.subplots(1, 2)
|
36 |
+
sns.boxplot(
|
37 |
+
data=df_clean,
|
38 |
+
x='gender',
|
39 |
+
y='face_conf',
|
40 |
+
hue='gender',
|
41 |
+
palette='hls',
|
42 |
+
ax=axes[0],
|
43 |
+
)
|
44 |
+
axes[0].set_title('Распределение уверенности детекций по полу')
|
45 |
+
axes[0].set_xlabel('Пол')
|
46 |
+
axes[0].set_ylabel('Уверенность')
|
47 |
+
# axes[0].tick_params(axis='x', labelrotation=45)
|
48 |
+
sns.boxplot(
|
49 |
+
data=df_clean,
|
50 |
+
x='emotion',
|
51 |
+
y='face_conf',
|
52 |
+
hue='emotion',
|
53 |
+
palette='hls',
|
54 |
+
ax=axes[1],
|
55 |
+
)
|
56 |
+
axes[1].set_title('Распределение уверенности детекций по эмоциям')
|
57 |
+
axes[1].set_xlabel('Эмоция')
|
58 |
+
axes[1].set_ylabel('Уверенность')
|
59 |
+
|
60 |
+
|
61 |
+
st.pyplot(fig, use_container_width=False)
|
62 |
+
st.write('---')
|
63 |
+
st.write('Количество детекций по классам эмоций и пола')
|
64 |
+
|
65 |
+
|
66 |
+
fig, axes = plt.subplots(1, 2)
|
67 |
+
sns.countplot(
|
68 |
+
data=df_clean,
|
69 |
+
x='emotion',
|
70 |
+
hue='emotion',
|
71 |
+
order=df_clean['emotion'].value_counts().index,
|
72 |
+
palette='viridis',
|
73 |
+
legend=False,
|
74 |
+
ax=axes[0],
|
75 |
+
)
|
76 |
+
axes[0].set_title('Количество обнаружений эмоций')
|
77 |
+
axes[0].set_xlabel('Эмоция')
|
78 |
+
axes[0].set_ylabel('Количество')
|
79 |
+
sns.countplot(
|
80 |
+
data=df_clean,
|
81 |
+
x='gender',
|
82 |
+
hue='gender',
|
83 |
+
order=df_clean['gender'].value_counts().index,
|
84 |
+
palette='Set2',
|
85 |
+
legend=False,
|
86 |
+
ax=axes[1],
|
87 |
+
)
|
88 |
+
axes[1].set_title('Количество обнаружений пола')
|
89 |
+
axes[1].set_xlabel('Пол')
|
90 |
+
axes[1].set_ylabel('Количество')
|
91 |
+
|
92 |
+
|
93 |
+
st.pyplot(fig, use_container_width=False)
|
94 |
+
st.write('---')
|
95 |
+
st.write('Количество детекций по классу эмоций в зависимости от пола и расы')
|
96 |
+
|
97 |
+
|
98 |
+
fig, axes = plt.subplots(1, 2)
|
99 |
+
sns.countplot(
|
100 |
+
data=df_clean,
|
101 |
+
x='emotion',
|
102 |
+
hue='gender',
|
103 |
+
palette='viridis',
|
104 |
+
order=df_clean['emotion'].value_counts().index,
|
105 |
+
ax=axes[0],
|
106 |
+
)
|
107 |
+
axes[0].set_title('Распределение пола по эмоциям')
|
108 |
+
axes[0].set_xlabel('Эмоция')
|
109 |
+
axes[0].set_ylabel('Количество')
|
110 |
+
axes[0].legend(title='Пол')
|
111 |
+
sns.countplot(
|
112 |
+
data=df_clean,
|
113 |
+
x='emotion',
|
114 |
+
hue='race',
|
115 |
+
palette='viridis',
|
116 |
+
order=df_clean['emotion'].value_counts().index,
|
117 |
+
ax=axes[1],
|
118 |
+
)
|
119 |
+
axes[1].set_title('Распределение рас по эмоциям')
|
120 |
+
axes[1].set_xlabel('Эмоция')
|
121 |
+
axes[1].set_ylabel('Количество')
|
122 |
+
axes[1].legend(title='Раса')
|
123 |
+
|
124 |
+
|
125 |
+
st.pyplot(fig, use_container_width=False)
|
126 |
+
st.write('---')
|
127 |
+
st.write('Кадры и секунды с наибольшим количеством детекций')
|
128 |
+
|
129 |
+
|
130 |
+
fig, axes = plt.subplots(1, 2)
|
131 |
+
face_count_per_frame = df.groupby('frame_num')['face_detected'].sum()
|
132 |
+
axes[0].plot(face_count_per_frame.index, face_count_per_frame.values, marker='o', linestyle='-')
|
133 |
+
axes[0].set_title('Частота обнаружения лиц по кадрам')
|
134 |
+
axes[0].set_xlabel('Номер кадра')
|
135 |
+
axes[0].set_ylabel('Количество обнаруженных лиц')
|
136 |
+
face_count_per_frame = df.groupby('frame_sec')['face_detected'].sum()
|
137 |
+
axes[1].plot(face_count_per_frame.index, face_count_per_frame.values, marker='o', linestyle='-')
|
138 |
+
axes[1].set_title('Частота обнар��жения лиц по секундам')
|
139 |
+
axes[1].set_xlabel('Время (сек)')
|
140 |
+
axes[1].set_ylabel('Количество обнаруженных лиц')
|
141 |
+
|
142 |
+
|
143 |
+
st.pyplot(fig, use_container_width=False)
|
144 |
+
st.write('---')
|
145 |
+
st.write('Какие классы в какое время были обнаружены (по эмоциям)')
|
146 |
+
|
147 |
+
|
148 |
+
fig, ax = plt.subplots(1, 1)
|
149 |
+
sns.scatterplot(
|
150 |
+
data=df_clean,
|
151 |
+
x='frame_sec',
|
152 |
+
y='emotion',
|
153 |
+
hue='emotion',
|
154 |
+
palette='deep',
|
155 |
+
s=50,
|
156 |
+
alpha=0.6,
|
157 |
+
legend=True,
|
158 |
+
ax=ax,
|
159 |
+
)
|
160 |
+
ax.set_title('Временная шкала обнаружения лиц по эмоциям')
|
161 |
+
ax.set_xlabel('Время видео (секунды)')
|
162 |
+
ax.set_ylabel('Эмоция')
|
163 |
+
ax.grid(True, linestyle='--', alpha=0.7)
|
164 |
+
ax.legend(title='Классы эмоций', bbox_to_anchor=(1.05, 1), loc='upper left')
|
165 |
+
|
166 |
+
|
167 |
+
st.pyplot(fig, use_container_width=False)
|
168 |
+
st.write('---')
|
169 |
+
st.write('Какие классы в какое время были обнаружены (по полу)')
|
170 |
+
|
171 |
+
|
172 |
+
fig, ax = plt.subplots()
|
173 |
+
sns.scatterplot(
|
174 |
+
data=df_clean,
|
175 |
+
x='frame_sec',
|
176 |
+
y='gender',
|
177 |
+
hue='gender',
|
178 |
+
palette='deep',
|
179 |
+
s=50,
|
180 |
+
alpha=0.6,
|
181 |
+
legend=True,
|
182 |
+
ax=ax,
|
183 |
+
)
|
184 |
+
ax.set_title('Временная шкала обнаружения лиц по полу')
|
185 |
+
ax.set_xlabel('Время видео (секунды)')
|
186 |
+
ax.set_ylabel('Пол')
|
187 |
+
ax.grid(True, linestyle='--', alpha=0.7)
|
188 |
+
ax.legend(title='Классы пола', bbox_to_anchor=(1.05, 1), loc='upper left')
|
189 |
+
|
190 |
+
|
191 |
+
st.pyplot(fig, use_container_width=False)
|
192 |
+
st.write('---')
|
193 |
+
st.write('Какие классы в какое время были обнаружены (по расе)')
|
194 |
+
|
195 |
+
|
196 |
+
fig, ax = plt.subplots()
|
197 |
+
sns.scatterplot(
|
198 |
+
data=df_clean,
|
199 |
+
x='frame_sec',
|
200 |
+
y='race',
|
201 |
+
hue='race',
|
202 |
+
palette='deep',
|
203 |
+
s=50,
|
204 |
+
alpha=0.6,
|
205 |
+
legend=True,
|
206 |
+
ax=ax,
|
207 |
+
)
|
208 |
+
ax.set_title('Временная шкала обнаружения лиц по расе')
|
209 |
+
ax.set_xlabel('Время видео (секунды)')
|
210 |
+
ax.set_ylabel('Раса')
|
211 |
+
ax.grid(True, linestyle='--', alpha=0.7)
|
212 |
+
ax.legend(title='Классы расы', bbox_to_anchor=(1.05, 1), loc='upper left')
|
213 |
+
|
214 |
+
|
215 |
+
st.pyplot(fig, use_container_width=False)
|
216 |
+
st.write('---')
|
217 |
+
st.write('Распределение смены эмоций по времени и кол-ву детекций')
|
218 |
+
|
219 |
+
|
220 |
+
fig, ax = plt.subplots()
|
221 |
+
emotion_timeline = df.pivot_table(
|
222 |
+
index='frame_sec',
|
223 |
+
columns='emotion',
|
224 |
+
aggfunc='size',
|
225 |
+
fill_value=0,
|
226 |
+
)
|
227 |
+
emotion_timeline.plot(kind='area', stacked=True, ax=ax)
|
228 |
+
ax.set_title('Изменение эмоций во времени')
|
229 |
+
ax.set_xlabel('Время видео (секунды)')
|
230 |
+
ax.set_ylabel('Количество детекций')
|
231 |
+
ax.grid(True, linestyle='--', alpha=0.7)
|
232 |
+
ax.legend(title='Эмоции', bbox_to_anchor=(1.05, 1), loc='upper left')
|
233 |
+
|
234 |
+
|
235 |
+
st.pyplot(fig, use_container_width=False)
|
236 |
+
st.write('---')
|
237 |
+
|
238 |
+
|
fonts/LiberationMono-Regular.ttf
ADDED
Binary file (108 kB). View file
|
|
media/emotion.jpg
ADDED
media/emotions_detect.png
ADDED
Git LFS Details
|
media/result_video.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df2012d876a8b65ce2f5b8a9d31c9bf89a2e254f82763657331ef12aacf809ce
|
3 |
+
size 3309385
|
pages/1_Video_Detection.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tempfile
|
2 |
+
|
3 |
+
import streamlit as st
|
4 |
+
|
5 |
+
|
6 |
+
# ===================== Настройки страницы ======================
|
7 |
+
|
8 |
+
st.set_page_config(
|
9 |
+
layout='wide',
|
10 |
+
initial_sidebar_state='auto',
|
11 |
+
page_title='Face Emotion Recognition',
|
12 |
+
page_icon='👻',
|
13 |
+
)
|
14 |
+
|
15 |
+
st.write("#### Детекция лиц, эмоций, пола, расы и возраста на видео")
|
16 |
+
|
17 |
+
# загрузка и отображение видео с примером детекции
|
18 |
+
@st.cache_data
|
19 |
+
def load_main_video(video_path: str) -> bytes:
|
20 |
+
with open(video_path, 'rb') as file:
|
21 |
+
video_bytes = file.read()
|
22 |
+
return video_bytes
|
23 |
+
|
24 |
+
MAIN_VIDEO_PATH = 'media/result_video.mp4'
|
25 |
+
example_video_bytes = load_main_video(MAIN_VIDEO_PATH)
|
26 |
+
|
27 |
+
video_width = 60
|
28 |
+
video_side = 100
|
29 |
+
|
30 |
+
_, container, _ = st.columns([video_side, video_width, video_side])
|
31 |
+
container.video(data=example_video_bytes)
|
32 |
+
|
33 |
+
# ===================== Боковое меню настроек ======================
|
34 |
+
|
35 |
+
st.sidebar.header('Настройки')
|
36 |
+
st.sidebar.write('---')
|
37 |
+
|
38 |
+
face_conf_threshold = st.sidebar.slider(
|
39 |
+
label='Порог уверенности для детекции лиц',
|
40 |
+
min_value=0.0,
|
41 |
+
max_value=1.0,
|
42 |
+
value=0.7,
|
43 |
+
step=0.01,
|
44 |
+
)
|
45 |
+
st.sidebar.write('---')
|
46 |
+
|
47 |
+
# какие действия нужно детектить (пока не реализовано переключение)
|
48 |
+
actions = ['age', 'gender', 'race', 'emotion']
|
49 |
+
# применять ли дополнительное выравнивание
|
50 |
+
st.sidebar.write('Применять ли дополнительное выравнивание')
|
51 |
+
align = st.sidebar.checkbox(label='Align', value=False)
|
52 |
+
|
53 |
+
|
54 |
+
# загрузка и инициализация моделей
|
55 |
+
with st.spinner('Инициализация/загрузка моделей...'):
|
56 |
+
from detector import detector_model
|
57 |
+
|
58 |
+
# ==================== Загрузка видео и детекция =================
|
59 |
+
|
60 |
+
st_video = st.file_uploader(label='Выберите видео')
|
61 |
+
st.session_state['video_ready_to_convert'] = False
|
62 |
+
st.session_state['annotations_ready'] = False
|
63 |
+
|
64 |
+
if st_video:
|
65 |
+
if st.button('Детекция видео'):
|
66 |
+
progress_text = 'Детекция видео...'
|
67 |
+
progress_bar = st.progress(0, text=progress_text)
|
68 |
+
|
69 |
+
temp_file = tempfile.NamedTemporaryFile(delete=False)
|
70 |
+
temp_file.write(st_video.read())
|
71 |
+
video_file = temp_file.name
|
72 |
+
|
73 |
+
generator = detector_model.detect_video(
|
74 |
+
video_file=video_file,
|
75 |
+
actions=actions,
|
76 |
+
align=align,
|
77 |
+
face_conf_threshold=face_conf_threshold,
|
78 |
+
)
|
79 |
+
frame_count, total_frames = next(generator)
|
80 |
+
for (frame_count, _) in generator:
|
81 |
+
progress_text = f'Детекция видео, кадр {frame_count}/{total_frames}'
|
82 |
+
progress_bar.progress(frame_count / total_frames, text=progress_text)
|
83 |
+
|
84 |
+
progress_bar.empty()
|
85 |
+
st.session_state['video_ready_to_convert'] = True
|
86 |
+
detector_model.detections_to_df()
|
87 |
+
|
88 |
+
# ======= Конвертация видео для отображения в браузере =================
|
89 |
+
|
90 |
+
if st.session_state['video_ready_to_convert']:
|
91 |
+
convert_video_path = 'result_video_convert.mp4'
|
92 |
+
with st.spinner('Идет конвертация видео ...'):
|
93 |
+
detector_model.convert_mp4(detector_model.save_video_path, convert_video_path)
|
94 |
+
# st.video(convert_video_path, format='video/mp4')
|
95 |
+
|
96 |
+
with open(str(convert_video_path), 'rb') as file:
|
97 |
+
video_bytes = file.read()
|
98 |
+
|
99 |
+
_, container, _ = st.columns([video_side, video_width, video_side])
|
100 |
+
container.video(data=video_bytes)
|
101 |
+
|
102 |
+
# ======================= Кнопка скачать видео ===================
|
103 |
+
|
104 |
+
st.download_button(
|
105 |
+
label='Скачать видео',
|
106 |
+
data=video_bytes,
|
107 |
+
file_name=detector_model.save_video_path,
|
108 |
+
)
|
109 |
+
st.session_state['video_ready_to_convert'] = False
|
110 |
+
|
pages/2_Video_Detection_Results.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import streamlit as st
|
5 |
+
|
6 |
+
from draw_utils import draw_plots
|
7 |
+
|
8 |
+
|
9 |
+
st.set_page_config(
|
10 |
+
layout='wide',
|
11 |
+
initial_sidebar_state='auto',
|
12 |
+
page_title='Face Emotion Recognition',
|
13 |
+
page_icon='👻',
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
st.write("#### Отображение результатов детекции видео")
|
18 |
+
|
19 |
+
csv_path = 'video_annotations.csv'
|
20 |
+
|
21 |
+
@st.cache_data
|
22 |
+
def load_df():
|
23 |
+
df = pd.read_csv(csv_path)
|
24 |
+
return df
|
25 |
+
|
26 |
+
if Path('video_annotations.csv').exists():
|
27 |
+
df = load_df()
|
28 |
+
st.download_button(
|
29 |
+
label='Скачать csv аннотации',
|
30 |
+
data=df.to_csv().encode('utf-8'),
|
31 |
+
file_name=csv_path,
|
32 |
+
mime='text/csv',
|
33 |
+
)
|
34 |
+
draw_plots(df)
|
35 |
+
else:
|
36 |
+
st.write(
|
37 |
+
'#### Для отображения результатов детекции выберите видео' \
|
38 |
+
'на вкладке Video Detection и нажмите "Детекция видео"'
|
39 |
+
)
|
pages/3_Web_Camera_Photo.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import streamlit as st
|
5 |
+
|
6 |
+
from detector import detector_model
|
7 |
+
|
8 |
+
|
9 |
+
# ===================== Настройки страницы ======================
|
10 |
+
|
11 |
+
st.set_page_config(
|
12 |
+
layout='wide',
|
13 |
+
initial_sidebar_state='auto',
|
14 |
+
page_title='Face Emotion Recognition',
|
15 |
+
page_icon='👻',
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
st.write("#### Детекция лиц, эмоций, пола и возраста с веб-камеры")
|
20 |
+
|
21 |
+
# ===================== Боковое меню настроек ======================
|
22 |
+
|
23 |
+
st.sidebar.header('Настройки')
|
24 |
+
st.sidebar.write('---')
|
25 |
+
|
26 |
+
face_conf_threshold = st.sidebar.slider(
|
27 |
+
label='Порог уверенности для детекции лиц',
|
28 |
+
min_value=0.0,
|
29 |
+
max_value=1.0,
|
30 |
+
value=0.7,
|
31 |
+
step=0.01,
|
32 |
+
)
|
33 |
+
st.sidebar.write('---')
|
34 |
+
|
35 |
+
# какие действия нужно детектить (пока не реализовано переключение)
|
36 |
+
actions = ['age', 'gender', 'race', 'emotion']
|
37 |
+
# применять ли дополнительное выравнивание
|
38 |
+
st.sidebar.write('Применять ли дополнительное выравнивание')
|
39 |
+
align = st.sidebar.checkbox(label='Align', value=False)
|
40 |
+
|
41 |
+
# ================= Чтение видеопотока с камеры ===============
|
42 |
+
|
43 |
+
st_image = st.camera_input("Сделать фото")
|
44 |
+
st.session_state['detect_image_ready'] = False
|
45 |
+
|
46 |
+
# ====================== Детекция ===========================
|
47 |
+
|
48 |
+
if st_image:
|
49 |
+
pil_image = Image.open(st_image)
|
50 |
+
np_image_rgb = np.array(pil_image)
|
51 |
+
|
52 |
+
np_image_bgr = cv2.cvtColor(np_image_rgb, cv2.COLOR_RGB2BGR)
|
53 |
+
with st.spinner('Распознавание фото'):
|
54 |
+
detections = detector_model.detect_image(
|
55 |
+
np_image_bgr,
|
56 |
+
actions=actions,
|
57 |
+
align=align,
|
58 |
+
)
|
59 |
+
|
60 |
+
with st.spinner('Отрисовка результата'):
|
61 |
+
result_np_image = detector_model.draw_detections(
|
62 |
+
np_image_rgb=np_image_rgb,
|
63 |
+
detections=detections,
|
64 |
+
face_conf_threshold=face_conf_threshold,
|
65 |
+
)
|
66 |
+
st.session_state['detect_image_ready'] = True
|
67 |
+
|
68 |
+
# ============= Отрисовка результата ======================
|
69 |
+
|
70 |
+
if st_image and st.session_state['detect_image_ready']:
|
71 |
+
col1, col2 = st.columns(2)
|
72 |
+
with col1:
|
73 |
+
st.image(np_image_rgb, caption='Исходное фото', use_column_width=True)
|
74 |
+
with col2:
|
75 |
+
st.image(result_np_image, caption='Результат детекции', use_column_width=True)
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow==2.15
|
2 |
+
tf-keras==2.15
|
3 |
+
deepface==0.0.93
|
4 |
+
streamlit==1.39.0
|
5 |
+
python-ffmpeg==2.0.12
|
6 |
+
seaborn==0.13.2
|