tosanoob commited on
Commit
aa25f09
1 Parent(s): eeef120

First upload

Browse files
1DCNN_Transformer_L-dim256_train8_1405_checkpoint.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b6db79af3b518150cea916c743d0d9e4cc8eb8e58aea4bec3b52dd714c6d2a3
3
+ size 33827128
Dockerfile ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+
3
+ WORKDIR /code
4
+
5
+ COPY * /code
6
+
7
+ RUN pip install --no-cache-dir -r /app/requirement.txt
8
+
9
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ import random
3
+ from pydantic import BaseModel
4
+ from typing import List
5
+ from utils import decode_image
6
+ from mediapipe_preprocess import mediapipe_process
7
+ from model_predict import model_predict
8
+ import numpy as np
9
+
10
+ class Item(BaseModel):
11
+ text: str
12
+ number: int | None = None
13
+
14
+ class RecordFrames(BaseModel):
15
+ images: List[str]
16
+
17
+ app = FastAPI()
18
+
19
+ @app.get("/")
20
+ def return_hello():
21
+ return {"text":"Hello from server"}
22
+
23
+ @app.get("/random")
24
+ def return_random():
25
+ return {"random number":int(random.random()*1000)}
26
+
27
+ @app.post("/receive")
28
+ def return_received(item: Item):
29
+ return {"text":item.text,
30
+ "number":item.number}
31
+
32
+ @app.post("/predict")
33
+ def return_predict(record: RecordFrames):
34
+ if not record.images:
35
+ raise HTTPException(status_code=400, detail="No images provided")
36
+
37
+ frames = [np.array(decode_image(img)) for img in record.images]
38
+ keypoints = mediapipe_process(frames)
39
+ label = model_predict(keypoints)
40
+ return {"label":label}
label_list.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"0": "an", "1": "ban", "2": "ban dem", "3": "ban ngay", "4": "bo", "5": "cam on", "6": "choi", "7": "cuoi", "8": "di", "9": "di hoc", "10": "khoc", "11": "lam viec", "12": "me", "13": "moi ngay", "14": "sach", "15": "toi", "16": "viet", "17": "xem", "18": "xin chao", "19": "xin loi"}
mediapipe_preprocess.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+ import numpy as np
3
+ import cv2
4
+ import copy
5
+
6
+ mp_holistic = mp.solutions.holistic
7
+ mp_drawing = mp.solutions.drawing_utils
8
+ width, height = 640, 480
9
+
10
+ model = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)
11
+
12
+ def mediapipe_detection(image):
13
+ # từ image, model dự đoán trả về kết quả (định dạng mặc định)
14
+ results = model.process(image)
15
+ return results
16
+
17
+ def extract_keypoint(results,last):
18
+ res = []
19
+ if results.pose_landmarks:
20
+ for p in results.pose_landmarks.landmark:
21
+ res.append(np.array([p.x,p.y,p.z,p.visibility]))
22
+ else:
23
+ for _ in range(33):
24
+ res.append(np.array([0,0,0,0]))
25
+ #--------------
26
+ if results.left_hand_landmarks:
27
+ for p in results.left_hand_landmarks.landmark:
28
+ res.append(np.array([p.x,p.y,p.z]))
29
+ elif last!= None and last.left_hand_landmarks:
30
+ for p in last.left_hand_landmarks.landmark:
31
+ res.append(np.array([p.x,p.y,p.z]))
32
+ else:
33
+ for _ in range(21):
34
+ res.append(np.array([0,0,0]))
35
+ #---------------
36
+ if results.right_hand_landmarks:
37
+ for p in results.right_hand_landmarks.landmark:
38
+ res.append(np.array([p.x,p.y,p.z]))
39
+ elif last!=None and last.right_hand_landmarks:
40
+ for p in last.right_hand_landmarks.landmark:
41
+ res.append(np.array([p.x,p.y,p.z]))
42
+ else:
43
+ for _ in range(21):
44
+ res.append(np.array([0,0,0]))
45
+ return res
46
+
47
+ def normalize_keypoint(res,img=None):
48
+ #normalize keypoint
49
+ x1,y1,x2,y2 = res[11][0]*width,res[11][1]*height,res[12][0]*width,res[12][1]*height
50
+ try:
51
+ cv2.circle(img,(int(x1),int(y1)),4,(0,255,255),-1)
52
+ cv2.circle(img,(int(x2),int(y2)),4,(0,255,255),-1)
53
+ except:
54
+ # print("No img found")
55
+ pass
56
+ dis = np.sqrt((x1-x2)**2+(y1-y2)**2)
57
+ x_cen = (res[11][0]+res[12][0])/2
58
+ y_cen = (res[11][1]+res[12][1])/2
59
+ vector = [0.5-x_cen,0.5-y_cen]
60
+ scale = (200*width/640)/dis
61
+ for i in range(len(res)):
62
+ if res[i][0]==0 and res[i][1]==0:
63
+ continue
64
+ res[i][0] = vector[0]+res[i][0]
65
+ res[i][1] = vector[1]+res[i][1]
66
+ res[i][0] = 0.5+(res[i][0]-0.5)*scale
67
+ res[i][1] = 0.5+(res[i][1]-0.5)*scale
68
+ return res
69
+
70
+ def update_mpresult(res,results,last):
71
+ c = 0
72
+ if results.pose_landmarks:
73
+ for p in results.pose_landmarks.landmark:
74
+ p.x = res[c][0]
75
+ p.y = res[c][1]
76
+ if(c==20 and p.y>1.1 and last): last.right_hand_landmarks = None
77
+ elif(c==19 and p.y>1.1 and last): last.left_hand_landmarks = None
78
+ c+=1
79
+ else:
80
+ for _ in range(33):
81
+ c+=1
82
+ if results.left_hand_landmarks:
83
+ for p in results.left_hand_landmarks.landmark:
84
+ p.x = res[c][0]
85
+ p.y = res[c][1]
86
+ c+=1
87
+ else:
88
+ if last!=None and last.left_hand_landmarks: results.left_hand_landmarks = copy.deepcopy(last.left_hand_landmarks)
89
+ for _ in range(21):
90
+ c+=1
91
+ if results.right_hand_landmarks:
92
+ for p in results.right_hand_landmarks.landmark:
93
+ p.x = res[c][0]
94
+ p.y = res[c][1]
95
+ c+=1
96
+ else:
97
+ if last!=None and last.right_hand_landmarks: results.right_hand_landmarks = copy.deepcopy(last.right_hand_landmarks)
98
+ for _ in range(21):
99
+ c+=1
100
+ return results
101
+
102
+ def extract_keypoints_flatten(result, last, img=None):
103
+ #đây là hàm chính thức
104
+ res = extract_keypoint(result, last)
105
+ res = normalize_keypoint(res,img)
106
+ update_mpresult(res,result,last)
107
+ return np.concatenate([x for x in res])
108
+
109
+ def mediapipe_process(frames):
110
+ """Main function to call, process a batch of frames into numpy array for prediction"""
111
+ sequence = []
112
+ last = None
113
+ for frame in frames:
114
+ results = mediapipe_detection(frame)
115
+ keypoints = extract_keypoints_flatten(results, last)
116
+ last = copy.deepcopy(results)
117
+ sequence.append(keypoints)
118
+ return np.array(sequence)
model.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------- Define auxiliary classes ---------
2
+
3
+ import os
4
+ import keras
5
+ import tensorflow as tf
6
+
7
+ @keras.saving.register_keras_serializable(package="1DCNN_Transformer")
8
+ class ECA(tf.keras.layers.Layer):
9
+ def __init__(self, kernel_size=5, **kwargs):
10
+ super().__init__(**kwargs)
11
+ self.supports_masking = True
12
+ self.kernel_size = kernel_size
13
+ self.conv = tf.keras.layers.Conv1D(1, kernel_size=kernel_size, strides=1, padding="same", use_bias=False)
14
+
15
+ def call(self, inputs, mask=None):
16
+ nn = tf.keras.layers.GlobalAveragePooling1D()(inputs, mask=mask)
17
+ nn = tf.expand_dims(nn, -1)
18
+ nn = self.conv(nn)
19
+ nn = tf.squeeze(nn, -1)
20
+ nn = tf.nn.sigmoid(nn)
21
+ nn = nn[:,None,:]
22
+ return inputs * nn
23
+
24
+ def get_config(self):
25
+ base_config = super().get_config()
26
+ config = {
27
+ # "supports_masking" : keras.saving.serialize_keras_object(self.supports_masking),
28
+ "kernel_size" : keras.saving.serialize_keras_object(self.kernel_size)
29
+ }
30
+ return {**base_config, **config}
31
+
32
+ @classmethod
33
+ def from_config(cls,config):
34
+ kernel_size_config = config.pop("kernel_size")
35
+ kernel_size = keras.saving.deserialize_keras_object(kernel_size_config)
36
+ return cls(kernel_size, **config)
37
+
38
+ @keras.saving.register_keras_serializable(package="1DCNN_Transformer")
39
+ class LateDropout(tf.keras.layers.Layer):
40
+ def __init__(self, rate, noise_shape=None, start_step=0, **kwargs):
41
+ super().__init__(**kwargs)
42
+ self.supports_masking = True
43
+ self.rate = rate
44
+ self.noise_shape = noise_shape
45
+ self.start_step = start_step
46
+ self.dropout = tf.keras.layers.Dropout(rate, noise_shape=noise_shape)
47
+
48
+ def build(self, input_shape):
49
+ super().build(input_shape)
50
+ agg = tf.VariableAggregation.ONLY_FIRST_REPLICA
51
+ self._train_counter = tf.Variable(0, dtype="int64", aggregation=agg, trainable=False)
52
+
53
+ def call(self, inputs, training=False):
54
+ x = tf.cond(self._train_counter < self.start_step, lambda:inputs, lambda:self.dropout(inputs, training=training))
55
+ if training:
56
+ self._train_counter.assign_add(1)
57
+ return x
58
+
59
+ def get_config(self):
60
+ base_config = super().get_config()
61
+ config = {
62
+ # "supports_masking" : keras.saving.serialize_keras_object(self.supports_masking),
63
+ "rate" : keras.saving.serialize_keras_object(self.rate),
64
+ "start_step" : keras.saving.serialize_keras_object(self.start_step),
65
+ "noise_shape" : keras.saving.serialize_keras_object(self.noise_shape),
66
+ }
67
+ return {**base_config, **config}
68
+
69
+ @classmethod
70
+ def from_config(cls,config):
71
+ rate_config = config.pop("rate")
72
+ rate = keras.saving.deserialize_keras_object(rate_config)
73
+ start_step_config = config.pop("start_step")
74
+ start_step = keras.saving.deserialize_keras_object(start_step_config)
75
+ noise_shape_config = config.pop("noise_shape")
76
+ noise_shape = keras.saving.deserialize_keras_object(noise_shape_config)
77
+ return cls(rate, noise_shape, start_step, **config)
78
+
79
+ @keras.saving.register_keras_serializable(package="1DCNN_Transformer")
80
+ class CausalDWConv1D(tf.keras.layers.Layer):
81
+ def __init__(self,
82
+ kernel_size=17,
83
+ dilation_rate=1,
84
+ use_bias=False,
85
+ depthwise_initializer='glorot_uniform',
86
+ name='', **kwargs):
87
+ super().__init__(name=name,**kwargs)
88
+ self.kernel_size = kernel_size
89
+ self.dilation_rate = dilation_rate
90
+ self.use_bias = use_bias
91
+ self.depthwise_initializer=depthwise_initializer
92
+ self.lname=name
93
+
94
+ self.causal_pad = tf.keras.layers.ZeroPadding1D((dilation_rate*(kernel_size-1),0),name=name + '_pad')
95
+ self.dw_conv = tf.keras.layers.DepthwiseConv1D(
96
+ kernel_size,
97
+ strides=1,
98
+ dilation_rate=dilation_rate,
99
+ padding='valid',
100
+ use_bias=use_bias,
101
+ depthwise_initializer=depthwise_initializer,
102
+ name=name + '_dwconv')
103
+ self.supports_masking = True
104
+
105
+ def call(self, inputs):
106
+ x = self.causal_pad(inputs)
107
+ x = self.dw_conv(x)
108
+ return x
109
+
110
+ def get_config(self):
111
+ base_config = super().get_config()
112
+ config = {
113
+ "kernel_size" : keras.saving.serialize_keras_object(self.kernel_size),
114
+ "dilation_rate" : keras.saving.serialize_keras_object(self.dilation_rate),
115
+ "use_bias" : keras.saving.serialize_keras_object(self.use_bias),
116
+ "depthwise_initializer" : keras.saving.serialize_keras_object(self.depthwise_initializer),
117
+ "name" : keras.saving.serialize_keras_object(self.lname),
118
+ }
119
+ return {**base_config, **config}
120
+
121
+ @classmethod
122
+ def from_config(cls,config):
123
+ kernel_size_config = config.pop("kernel_size")
124
+ kernel_size = keras.saving.deserialize_keras_object(kernel_size_config)
125
+ dilation_rate_config = config.pop("dilation_rate")
126
+ dilation_rate = keras.saving.deserialize_keras_object(dilation_rate_config)
127
+ bias_config = config.pop("use_bias")
128
+ bias = keras.saving.deserialize_keras_object(bias_config)
129
+ depthwise_config = config.pop("depthwise_initializer")
130
+ depthwise = keras.saving.deserialize_keras_object(depthwise_config)
131
+ name_config = config.pop("name")
132
+ name = keras.saving.deserialize_keras_object(name_config)
133
+
134
+ return cls(kernel_size,dilation_rate,bias,depthwise,name, **config)
135
+
136
+ def Conv1DBlock(channel_size,
137
+ kernel_size,
138
+ dilation_rate=1,
139
+ drop_rate=0.0,
140
+ expand_ratio=2,
141
+ se_ratio=0.25,
142
+ activation='swish',
143
+ name=None):
144
+ '''
145
+ efficient conv1d block, @hoyso48
146
+ '''
147
+ if name is None:
148
+ name = str(tf.keras.backend.get_uid("mbblock"))
149
+ # Expansion phase
150
+ def apply(inputs):
151
+ channels_in = tf.keras.backend.int_shape(inputs)[-1]
152
+ channels_expand = channels_in * expand_ratio
153
+
154
+ skip = inputs
155
+
156
+ x = tf.keras.layers.Dense(
157
+ channels_expand,
158
+ use_bias=True,
159
+ activation=activation,
160
+ name=name + '_expand_conv')(inputs)
161
+
162
+ # Depthwise Convolution
163
+ x = CausalDWConv1D(kernel_size,
164
+ dilation_rate=dilation_rate,
165
+ use_bias=False,
166
+ name=name + '_dwconv')(x)
167
+
168
+ x = tf.keras.layers.BatchNormalization(momentum=0.95, name=name + '_bn')(x)
169
+
170
+ x = ECA()(x)
171
+
172
+ x = tf.keras.layers.Dense(
173
+ channel_size,
174
+ use_bias=True,
175
+ name=name + '_project_conv')(x)
176
+
177
+ if drop_rate > 0:
178
+ x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1), name=name + '_drop')(x)
179
+
180
+ if (channels_in == channel_size):
181
+ x = tf.keras.layers.add([x, skip], name=name + '_add')
182
+ return x
183
+
184
+ return apply
185
+
186
+
187
+ @keras.saving.register_keras_serializable(package="1DCNN_Transformer")
188
+ class MultiHeadSelfAttention(tf.keras.layers.Layer):
189
+ def __init__(self, dim=256, num_heads=4, dropout=0, **kwargs):
190
+ super().__init__(**kwargs)
191
+ self.dim = dim
192
+ self.scale = self.dim ** -0.5
193
+ self.num_heads = num_heads
194
+ self.dropout = dropout
195
+ self.qkv = tf.keras.layers.Dense(3 * dim, use_bias=False)
196
+ self.drop1 = tf.keras.layers.Dropout(dropout)
197
+ self.proj = tf.keras.layers.Dense(dim, use_bias=False)
198
+ self.supports_masking = True
199
+
200
+ def call(self, inputs, mask=None):
201
+ qkv = self.qkv(inputs)
202
+ qkv = tf.keras.layers.Permute((2, 1, 3))(tf.keras.layers.Reshape((-1, self.num_heads, self.dim * 3 // self.num_heads))(qkv))
203
+ q, k, v = tf.split(qkv, [self.dim // self.num_heads] * 3, axis=-1)
204
+
205
+ attn = tf.matmul(q, k, transpose_b=True) * self.scale
206
+
207
+ if mask is not None:
208
+ mask = mask[:, None, None, :]
209
+
210
+ attn = tf.keras.layers.Softmax(axis=-1)(attn, mask=mask)
211
+ attn = self.drop1(attn)
212
+
213
+ x = attn @ v
214
+ x = tf.keras.layers.Reshape((-1, self.dim))(tf.keras.layers.Permute((2, 1, 3))(x))
215
+ x = self.proj(x)
216
+ return x
217
+
218
+ def get_config(self):
219
+ base_config = super().get_config()
220
+ config = {
221
+ "dim" : self.dim,
222
+ "num_heads" : self.num_heads,
223
+ "dropout" : self.dropout,
224
+ }
225
+ return {**base_config, **config}
226
+
227
+ @classmethod
228
+ def from_config(cls,config):
229
+ dim_config = config.pop("dim")
230
+ dim = keras.saving.deserialize_keras_object(dim_config)
231
+ num_heads_config = config.pop("num_heads")
232
+ num_heads = keras.saving.deserialize_keras_object(num_heads_config)
233
+ dropout_config = config.pop("dropout")
234
+ dropout = keras.saving.deserialize_keras_object(dropout_config)
235
+ return cls(dim,num_heads,dropout)
236
+
237
+ def TransformerBlock(dim=256, num_heads=4, expand=4, attn_dropout=0.2, drop_rate=0.2, activation='swish'):
238
+ def apply(inputs):
239
+ x = inputs
240
+ x = tf.keras.layers.BatchNormalization(momentum=0.95)(x)
241
+ x = MultiHeadSelfAttention(dim=dim,num_heads=num_heads,dropout=attn_dropout)(x)
242
+ x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x)
243
+ x = tf.keras.layers.Add()([inputs, x])
244
+ attn_out = x
245
+
246
+ x = tf.keras.layers.BatchNormalization(momentum=0.95)(x)
247
+ x = tf.keras.layers.Dense(dim*expand, use_bias=False, activation=activation)(x)
248
+ x = tf.keras.layers.Dense(dim, use_bias=False)(x)
249
+ x = tf.keras.layers.Dropout(drop_rate, noise_shape=(None,1,1))(x)
250
+ x = tf.keras.layers.Add()([attn_out, x])
251
+ return x
252
+ return apply
253
+
254
+ MAX_LEN = 30 # number of frame
255
+ CHANNELS = 258 # number of keypoint value
256
+ NUM_CLASSES = 20
257
+ PAD = -100
258
+
259
+ # ----------------------------------------- DEFINE MODEL -----------------------------
260
+ def get_model(max_len=MAX_LEN, dropout_step=0, dim=256):
261
+ inp = tf.keras.Input((max_len,CHANNELS))
262
+ # x = tf.keras.layers.Masking(mask_value=PAD,input_shape=(max_len,CHANNELS))(inp) #we don't need masking layer with inference
263
+ x = inp
264
+ ksize = 3
265
+ x = tf.keras.layers.Permute((2,1))(x)
266
+ x = tf.keras.layers.Dense(dim, use_bias=False,name='stem_conv')(x)
267
+ x = tf.keras.layers.BatchNormalization(momentum=0.95,name='stem_bn')(x)
268
+
269
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
270
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
271
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
272
+ x = TransformerBlock(dim,expand=2)(x)
273
+
274
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
275
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
276
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
277
+ x = TransformerBlock(dim,expand=2)(x)
278
+
279
+ if dim == 384: #for the 4x sized model
280
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
281
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
282
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
283
+ x = TransformerBlock(dim,expand=2)(x)
284
+
285
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
286
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
287
+ x = Conv1DBlock(dim,ksize,drop_rate=0.2)(x)
288
+ x = TransformerBlock(dim,expand=2)(x)
289
+
290
+ x = tf.keras.layers.Dense(dim*2,activation=None,name='top_conv')(x)
291
+ x = tf.keras.layers.GlobalAveragePooling1D()(x)
292
+ # x = LateDropout(0.5, start_step=dropout_step)(x)
293
+ x = tf.keras.layers.Dense(NUM_CLASSES,name='classifier',activation="softmax")(x)
294
+ return tf.keras.Model(inp, x)
295
+
296
+ def load_model(path='1DCNN_Transformer_L-dim256_train8_1405_checkpoint.weights.h5'):
297
+ model = get_model()
298
+ module_dir = os.path.dirname(os.path.abspath(__file__))
299
+ model_path = os.path.join(module_dir,path)
300
+ model.load_weights(model_path)
301
+ return model
model_predict.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model import load_model
2
+ import json
3
+ import numpy as np
4
+
5
+ classifier = load_model()
6
+ with open("label_list.json","r") as infile:
7
+ actions = list(json.load(infile).values())
8
+
9
+ def model_predict(input):
10
+ """Perform prediction on input (numpy array), return a label (str)"""
11
+
12
+ res = classifier.predict(np.expand_dims(input,axis=0))[0]
13
+ label = actions[np.argmax(res)]
14
+ return label
requirement.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ keras>=3.3.3
2
+ tensorflow>=2.16.1
3
+ opencv-python>=4.9.0.80
4
+ request>=2.31.0
5
+ mediapipe>=0.10.14
6
+ uvicorn>=0.29.0
7
+ numpy>=1.26.4
8
+ pillow>=10.3.0
utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from PIL import Image
3
+ import io
4
+
5
+ def decode_image(encoded_image):
6
+ """Decode the base64 string to bytes"""
7
+ image_data = base64.b64decode(encoded_image)
8
+ # Open the image using PIL
9
+ image = Image.open(io.BytesIO(image_data))
10
+ return image
11
+
12
+ def encode_image(buffer):
13
+ encoded_image = base64.b64encode(buffer).decode('utf-8')
14
+ return encoded_image