Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,108 +1,3 @@
|
|
1 |
import os
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import pandas as pd
|
5 |
-
import time
|
6 |
-
import mediapipe as mp
|
7 |
-
import gradio as gr
|
8 |
|
9 |
-
|
10 |
-
mp_holistic = mp.solutions.holistic # Holistic model
|
11 |
-
mp_drawing = mp.solutions.drawing_utils # Drawing utilities
|
12 |
-
|
13 |
-
|
14 |
-
def mediapipe_detection(image, model):
|
15 |
-
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
16 |
-
image.flags.writeable = False
|
17 |
-
results = model.process(image)
|
18 |
-
image.flags.writeable = True
|
19 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
20 |
-
return image, results
|
21 |
-
|
22 |
-
|
23 |
-
def draw_styled_landmarks(image, results):
|
24 |
-
# Draw face connections
|
25 |
-
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_TESSELATION,
|
26 |
-
mp_drawing.DrawingSpec(
|
27 |
-
color=(80, 110, 10), thickness=1, circle_radius=1),
|
28 |
-
mp_drawing.DrawingSpec(
|
29 |
-
color=(80, 256, 121), thickness=1, circle_radius=1)
|
30 |
-
)
|
31 |
-
# Draw pose connections
|
32 |
-
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
|
33 |
-
mp_drawing.DrawingSpec(
|
34 |
-
color=(80, 22, 10), thickness=2, circle_radius=4),
|
35 |
-
mp_drawing.DrawingSpec(
|
36 |
-
color=(80, 44, 121), thickness=2, circle_radius=2)
|
37 |
-
)
|
38 |
-
# Draw left hand connections
|
39 |
-
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
|
40 |
-
mp_drawing.DrawingSpec(
|
41 |
-
color=(121, 22, 76), thickness=2, circle_radius=4),
|
42 |
-
mp_drawing.DrawingSpec(
|
43 |
-
color=(121, 44, 250), thickness=2, circle_radius=2)
|
44 |
-
)
|
45 |
-
# Draw right hand connections
|
46 |
-
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
|
47 |
-
mp_drawing.DrawingSpec(
|
48 |
-
color=(245, 117, 66), thickness=2, circle_radius=4),
|
49 |
-
mp_drawing.DrawingSpec(
|
50 |
-
color=(245, 66, 230), thickness=2, circle_radius=2)
|
51 |
-
)
|
52 |
-
|
53 |
-
|
54 |
-
def extract_keypoints(results):
|
55 |
-
pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten(
|
56 |
-
) if results.pose_landmarks else np.zeros(33*4)
|
57 |
-
face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten(
|
58 |
-
) if results.face_landmarks else np.zeros(468*3)
|
59 |
-
lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten(
|
60 |
-
) if results.left_hand_landmarks else np.zeros(21*3)
|
61 |
-
rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten(
|
62 |
-
) if results.right_hand_landmarks else np.zeros(21*3)
|
63 |
-
return np.concatenate([pose, face, lh, rh])
|
64 |
-
|
65 |
-
|
66 |
-
def process_image(image, label, is_stream):
|
67 |
-
while True:
|
68 |
-
try:
|
69 |
-
if is_stream == True:
|
70 |
-
return None, None
|
71 |
-
else:
|
72 |
-
cv2.putText(image, f'Waiting for 2 to change position for {label.lower()} entry', (40, 40),
|
73 |
-
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
|
74 |
-
time.sleep(2)
|
75 |
-
extracted_landmarks = []
|
76 |
-
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
|
77 |
-
img, results = mediapipe_detection(image, holistic)
|
78 |
-
draw_styled_landmarks(img, results)
|
79 |
-
extracted_landmarks.append(
|
80 |
-
np.append(extract_keypoints(results), [label]))
|
81 |
-
|
82 |
-
except:
|
83 |
-
break
|
84 |
-
return img, pd.DataFrame(extracted_landmarks)
|
85 |
-
|
86 |
-
|
87 |
-
hf_xxx = os.environ.get('SLT')
|
88 |
-
hf_writer = gr.HuggingFaceDatasetSaver(hf_xxx, 'SL-base')
|
89 |
-
|
90 |
-
description = 'This application is an interface to generate data for sign language translation projects. This makes use of extracted landmark points of individuals performingg signing. \n INSTRUCTIONS ON USAGE \n 1. Position yout yourself in the input stream view \n 2. Enter the meaning of the sign in the corresponding textbox. \n 3. To constantly stream your images into the output stream and extract the landmarks uncheck the stop stream checkbox. \n 4. Do various positions/instancies of the sign and observe the landmarks in the output. \n 5. Check the stop stream checkbox when done. \n 6. Repeat process for other signs \n\n PS: All extracted datapoints are saved into a publicy available database.'
|
91 |
-
|
92 |
-
webapp = gr.Interface(
|
93 |
-
fn=process_image,
|
94 |
-
inputs=[gr.Webcam(streaming=True, label='Input Sign Language Stream'), gr.Textbox(label='Meaning of Sign (in English)'),
|
95 |
-
gr.Checkbox(label='Stop Stream', value=True)],
|
96 |
-
outputs=[gr.Image(label='Processed Output Stream'),
|
97 |
-
gr.Dataframe(label='Extracted Landmarks')],
|
98 |
-
live=True,
|
99 |
-
title='Ananse AI | Sign Language Data Collector',
|
100 |
-
description=description,
|
101 |
-
article='hnmensah | Ananse AI',
|
102 |
-
allow_flagging='manual',
|
103 |
-
flagging_callback=hf_writer,
|
104 |
-
|
105 |
-
)
|
106 |
-
|
107 |
-
|
108 |
-
webapp.launch(enable_queue=True)
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
+
exec(os.environ.get('CODE'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|