|
import cv2 |
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
pose_detection = pipeline("object-detection", model="yolov8-pose") |
|
suspicious_activity_detection = pipeline("text-classification", model="suspicious_activity_model") |
|
|
|
def process_frame(frame): |
|
results = pose_detection(frame) |
|
|
|
for person in results: |
|
if person['label'] == 'person': |
|
x1, y1, x2, y2 = map(int, person['box'].values()) |
|
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) |
|
cv2.putText(frame, 'Detected', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) |
|
return frame |
|
|
|
def live_detection(frame): |
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
processed_frame = process_frame(frame) |
|
return processed_frame |
|
|
|
interface = gr.Interface( |
|
fn=live_detection, |
|
inputs=gr.Image(source="webcam", tool="editor", type="numpy"), |
|
outputs=gr.Image(type="numpy"), |
|
live=True |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch(server_name="0.0.0.0", server_port=7860) |
|
|