import pandas as pd import numpy as np import pickle import gradio as gr import mediapipe as mp import cv2 from PIL import Image from landmarks import landmarks mp_drawing = mp.solutions.drawing_utils mp_pose = mp.solutions.pose pose = mp_pose.Pose(min_tracking_confidence=0.5, min_detection_confidence=0.5) with open('deadlift.pkl', 'rb') as f: model = pickle.load(f) current_stage = '' counter = 0 bodylang_prob = np.array([0,0]) bodylang_class = '' def detect(frame): global current_stage global counter global bodylang_class global bodylang_prob image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = pose.process(image) mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS, mp_drawing.DrawingSpec(color=(106,13,173), thickness=4, circle_radius = 5), mp_drawing.DrawingSpec(color=(255,102,0), thickness=5, circle_radius = 10)) try: row = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten().tolist() X = pd.DataFrame([row], columns=landmarks) bodylang_prob = model.predict_proba(X)[0] bodylang_class = model.predict(X)[0] if bodylang_class =="down" and bodylang_prob[bodylang_prob.argmax()] > 0.7: current_stage = "down" elif current_stage == "down" and bodylang_class == "up" and bodylang_prob[bodylang_prob.argmax()] > 0.7: current_stage = "up" counter += 1 except Exception as e: print(e) return image def snap(image, video): return detect(video[0]), video demo = gr.Interface( snap, [ gr.Image(sources=["webcam"], label="Image"), gr.Video(sources=["webcam"], label="Video") ], [ gr.Image(label="Processed Image"), gr.Video(label="Processed Video") ], title="Deadlift Tracker", ) if __name__ == "__main__": demo.launch()