ElenaRyumina
commited on
Commit
β’
cf60969
1
Parent(s):
485063f
Update
Browse files- app/app_utils.py +6 -6
- app/model.py +2 -2
- config.toml +0 -1
- result.mp4 +0 -0
app/app_utils.py
CHANGED
@@ -42,9 +42,9 @@ def preprocess_image_and_predict(inp):
|
|
42 |
for fl in results.multi_face_landmarks:
|
43 |
startX, startY, endX, endY = get_box(fl, w, h)
|
44 |
cur_face = inp[startY:endY, startX:endX]
|
45 |
-
cur_face_n = pth_processing(Image.fromarray(cur_face))
|
46 |
prediction = (
|
47 |
-
torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
|
48 |
.detach()
|
49 |
.numpy()[0]
|
50 |
)
|
@@ -91,8 +91,8 @@ def preprocess_video_and_predict(video):
|
|
91 |
cur_face = frame_copy[startY:endY, startX: endX]
|
92 |
|
93 |
if (count_frame-1)%config_data.FRAME_DOWNSAMPLING == 0:
|
94 |
-
cur_face_copy = pth_processing(Image.fromarray(cur_face))
|
95 |
-
features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).
|
96 |
|
97 |
if len(lstm_features) == 0:
|
98 |
lstm_features = [features]*10
|
@@ -100,8 +100,8 @@ def preprocess_video_and_predict(video):
|
|
100 |
lstm_features = lstm_features[1:] + [features]
|
101 |
|
102 |
lstm_f = torch.from_numpy(np.vstack(lstm_features))
|
103 |
-
lstm_f = torch.unsqueeze(lstm_f, 0)
|
104 |
-
output = pth_model_dynamic(lstm_f).
|
105 |
last_output = output
|
106 |
else:
|
107 |
if last_output is not None:
|
|
|
42 |
for fl in results.multi_face_landmarks:
|
43 |
startX, startY, endX, endY = get_box(fl, w, h)
|
44 |
cur_face = inp[startY:endY, startX:endX]
|
45 |
+
cur_face_n = pth_processing(Image.fromarray(cur_face))
|
46 |
prediction = (
|
47 |
+
torch.nn.functional.softmax(pth_model_static(cur_face_n), dim=1)
|
48 |
.detach()
|
49 |
.numpy()[0]
|
50 |
)
|
|
|
91 |
cur_face = frame_copy[startY:endY, startX: endX]
|
92 |
|
93 |
if (count_frame-1)%config_data.FRAME_DOWNSAMPLING == 0:
|
94 |
+
cur_face_copy = pth_processing(Image.fromarray(cur_face))
|
95 |
+
features = torch.nn.functional.relu(pth_model_static.extract_features(cur_face_copy)).detach().numpy()
|
96 |
|
97 |
if len(lstm_features) == 0:
|
98 |
lstm_features = [features]*10
|
|
|
100 |
lstm_features = lstm_features[1:] + [features]
|
101 |
|
102 |
lstm_f = torch.from_numpy(np.vstack(lstm_features))
|
103 |
+
lstm_f = torch.unsqueeze(lstm_f, 0)
|
104 |
+
output = pth_model_dynamic(lstm_f).detach().numpy()
|
105 |
last_output = output
|
106 |
else:
|
107 |
if last_output is not None:
|
app/model.py
CHANGED
@@ -27,9 +27,9 @@ def load_model(model_url, model_path):
|
|
27 |
return None
|
28 |
|
29 |
|
30 |
-
pth_model_static = load_model(config_data.model_static_url, config_data.model_static_path)
|
31 |
|
32 |
-
pth_model_dynamic = load_model(config_data.model_dynamic_url, config_data.model_dynamic_path)
|
33 |
|
34 |
|
35 |
|
|
|
27 |
return None
|
28 |
|
29 |
|
30 |
+
pth_model_static = load_model(config_data.model_static_url, config_data.model_static_path)
|
31 |
|
32 |
+
pth_model_dynamic = load_model(config_data.model_dynamic_url, config_data.model_dynamic_path)
|
33 |
|
34 |
|
35 |
|
config.toml
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
APP_VERSION = "0.2.0"
|
2 |
-
DEVICE = "cpu"
|
3 |
FRAME_DOWNSAMPLING = 5
|
4 |
|
5 |
[model_static]
|
|
|
1 |
APP_VERSION = "0.2.0"
|
|
|
2 |
FRAME_DOWNSAMPLING = 5
|
3 |
|
4 |
[model_static]
|
result.mp4
CHANGED
Binary files a/result.mp4 and b/result.mp4 differ
|
|