Spaces:
Build error
Build error
Podtekatel
commited on
Commit
•
ef164a1
1
Parent(s):
e0067df
Fix error for no objects
Browse files- app.py +6 -0
- inference/face_detector.py +3 -1
- inference/model_pipeline.py +3 -3
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
|
3 |
import gradio as gr
|
@@ -9,6 +10,11 @@ from inference.face_detector import StatRetinaFaceDetector
|
|
9 |
from inference.model_pipeline import VSNetModelPipeline
|
10 |
from inference.onnx_model import ONNXModel
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
MODEL_IMG_SIZE = 256
|
13 |
def load_model():
|
14 |
REPO_ID = "Podtekatel/JJBAGAN"
|
|
|
1 |
+
import logging
|
2 |
import os
|
3 |
|
4 |
import gradio as gr
|
|
|
10 |
from inference.model_pipeline import VSNetModelPipeline
|
11 |
from inference.onnx_model import ONNXModel
|
12 |
|
13 |
+
logging.basicConfig(
|
14 |
+
format='%(asctime)s %(levelname)-8s %(message)s',
|
15 |
+
level=logging.INFO,
|
16 |
+
datefmt='%Y-%m-%d %H:%M:%S')
|
17 |
+
|
18 |
MODEL_IMG_SIZE = 256
|
19 |
def load_model():
|
20 |
REPO_ID = "Podtekatel/JJBAGAN"
|
inference/face_detector.py
CHANGED
@@ -96,7 +96,9 @@ class StatRetinaFaceDetector(FaceDetector):
|
|
96 |
def detect_crops(self, img, *args, **kwargs):
|
97 |
faces = RetinaFace.detect_faces(img, model=self.model)
|
98 |
crops = []
|
99 |
-
|
|
|
|
|
100 |
x1, y1, x2, y2 = face['facial_area']
|
101 |
crop = np.array([x1, y1, x2, y2])
|
102 |
crops.append(crop)
|
|
|
96 |
def detect_crops(self, img, *args, **kwargs):
|
97 |
faces = RetinaFace.detect_faces(img, model=self.model)
|
98 |
crops = []
|
99 |
+
if len(faces) == 0:
|
100 |
+
faces = {}
|
101 |
+
for name, face in faces.items():
|
102 |
x1, y1, x2, y2 = face['facial_area']
|
103 |
crop = np.array([x1, y1, x2, y2])
|
104 |
crops.append(crop)
|
inference/model_pipeline.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import time
|
2 |
|
3 |
import cv2
|
@@ -23,7 +24,6 @@ class VSNetModelPipeline:
|
|
23 |
|
24 |
Y, X = np.ogrid[:h, :w]
|
25 |
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
|
26 |
-
print(dist_from_center.max())
|
27 |
dist_from_center = np.clip(dist_from_center, a_min=0, a_max=max(h / 2, w / 2))
|
28 |
dist_from_center = 1 - dist_from_center / np.max(dist_from_center)
|
29 |
if power is not None:
|
@@ -86,7 +86,7 @@ class VSNetModelPipeline:
|
|
86 |
out_faces = out_faces.transpose(0, 2, 3, 1)
|
87 |
out_faces = np.clip(out_faces * 255, 0, 255).astype(np.uint8)
|
88 |
end_time = time.time()
|
89 |
-
|
90 |
else:
|
91 |
out_faces = []
|
92 |
img = self.resize_size(img, size=self.no_detected_resize)
|
@@ -100,7 +100,7 @@ class VSNetModelPipeline:
|
|
100 |
full_image = full_image.transpose(0, 2, 3, 1)
|
101 |
full_image = np.clip(full_image * 255, 0, 255).astype(np.uint8)
|
102 |
end_time = time.time()
|
103 |
-
|
104 |
|
105 |
result_image = self.merge_crops(out_faces, coords, full_image[0])
|
106 |
return result_image
|
|
|
1 |
+
import logging
|
2 |
import time
|
3 |
|
4 |
import cv2
|
|
|
24 |
|
25 |
Y, X = np.ogrid[:h, :w]
|
26 |
dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
|
|
|
27 |
dist_from_center = np.clip(dist_from_center, a_min=0, a_max=max(h / 2, w / 2))
|
28 |
dist_from_center = 1 - dist_from_center / np.max(dist_from_center)
|
29 |
if power is not None:
|
|
|
86 |
out_faces = out_faces.transpose(0, 2, 3, 1)
|
87 |
out_faces = np.clip(out_faces * 255, 0, 255).astype(np.uint8)
|
88 |
end_time = time.time()
|
89 |
+
logging.info(f'Face FPS {1 / (end_time - start_time)}')
|
90 |
else:
|
91 |
out_faces = []
|
92 |
img = self.resize_size(img, size=self.no_detected_resize)
|
|
|
100 |
full_image = full_image.transpose(0, 2, 3, 1)
|
101 |
full_image = np.clip(full_image * 255, 0, 255).astype(np.uint8)
|
102 |
end_time = time.time()
|
103 |
+
logging.info(f'Background FPS {1 / (end_time - start_time)}')
|
104 |
|
105 |
result_image = self.merge_crops(out_faces, coords, full_image[0])
|
106 |
return result_image
|