Update app.py
Browse files
app.py
CHANGED
@@ -1,128 +1,20 @@
|
|
1 |
-
import cv2
|
2 |
-
import torch
|
3 |
-
import numpy as np
|
4 |
import gradio as gr
|
5 |
from ultralytics import YOLO
|
6 |
-
from deep_sort_realtime.deep_sort import DeepSort
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
"""
|
11 |
-
Initialize object tracker with YOLO and DeepSort
|
12 |
-
"""
|
13 |
-
# Load YOLO model for person detection
|
14 |
-
self.model = YOLO(person_model_path)
|
15 |
-
|
16 |
-
# Initialize DeepSort tracker
|
17 |
-
self.tracker = DeepSort(
|
18 |
-
max_age=30, # Tracks can be lost for up to 30 frames
|
19 |
-
n_init=3, # Number of consecutive detections before track is confirmed
|
20 |
-
)
|
21 |
-
|
22 |
-
# Tracking statistics
|
23 |
-
self.person_count = 0
|
24 |
-
self.tracking_data = {}
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
# Detect persons using YOLO
|
31 |
-
results = self.model(frame, classes=[0], conf=0.5)
|
32 |
-
|
33 |
-
# Extract bounding boxes and confidences
|
34 |
-
detections = []
|
35 |
-
for r in results:
|
36 |
-
boxes = r.boxes
|
37 |
-
for box in boxes:
|
38 |
-
# Convert to [x, y, w, h] format for DeepSort
|
39 |
-
x1, y1, x2, y2 = box.xyxy[0]
|
40 |
-
bbox = [x1.item(), y1.item(), (x2-x1).item(), (y2-y1).item()]
|
41 |
-
conf = box.conf.item()
|
42 |
-
detections.append((bbox, conf))
|
43 |
-
|
44 |
-
# Update tracks
|
45 |
-
if detections:
|
46 |
-
tracks = self.tracker.update_tracks(
|
47 |
-
detections,
|
48 |
-
frame=frame
|
49 |
-
)
|
50 |
-
|
51 |
-
# Annotate frame with tracking information
|
52 |
-
for track in tracks:
|
53 |
-
if not track.is_confirmed():
|
54 |
-
continue
|
55 |
-
|
56 |
-
track_id = track.track_id
|
57 |
-
ltrb = track.to_ltrb()
|
58 |
-
|
59 |
-
# Draw bounding box
|
60 |
-
cv2.rectangle(
|
61 |
-
frame,
|
62 |
-
(int(ltrb[0]), int(ltrb[1])),
|
63 |
-
(int(ltrb[2]), int(ltrb[3])),
|
64 |
-
(0, 255, 0),
|
65 |
-
2
|
66 |
-
)
|
67 |
-
|
68 |
-
# Add track ID
|
69 |
-
cv2.putText(
|
70 |
-
frame,
|
71 |
-
f'ID: {track_id}',
|
72 |
-
(int(ltrb[0]), int(ltrb[1]-10)),
|
73 |
-
cv2.FONT_HERSHEY_SIMPLEX,
|
74 |
-
0.9,
|
75 |
-
(0, 255, 0),
|
76 |
-
2
|
77 |
-
)
|
78 |
-
|
79 |
-
return frame
|
80 |
-
|
81 |
-
def process_video(input_video):
|
82 |
-
"""
|
83 |
-
Main video processing function for Gradio
|
84 |
-
"""
|
85 |
-
# Initialize tracker
|
86 |
-
tracker = ObjectTracker()
|
87 |
-
|
88 |
-
# Open input video
|
89 |
-
cap = cv2.VideoCapture(input_video)
|
90 |
-
|
91 |
-
# Prepare output video writer
|
92 |
-
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
93 |
-
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
94 |
-
fps = cap.get(cv2.CAP_PROP_FPS)
|
95 |
-
|
96 |
-
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
97 |
-
out = cv2.VideoWriter('output_tracked.mp4', fourcc, fps, (width, height))
|
98 |
-
|
99 |
-
# Process video frames
|
100 |
-
while cap.isOpened():
|
101 |
-
ret, frame = cap.read()
|
102 |
-
if not ret:
|
103 |
-
break
|
104 |
-
|
105 |
-
# Process and annotate frame
|
106 |
-
processed_frame = tracker.process_frame(frame)
|
107 |
-
|
108 |
-
# Write processed frame
|
109 |
-
out.write(processed_frame)
|
110 |
-
|
111 |
-
# Release resources
|
112 |
-
cap.release()
|
113 |
-
out.release()
|
114 |
-
|
115 |
-
return 'output_tracked.mp4'
|
116 |
|
117 |
# Create Gradio interface
|
118 |
-
|
119 |
-
fn=
|
120 |
-
inputs=gr.
|
121 |
-
outputs=
|
122 |
-
title="Person Tracking with YOLO and DeepSort",
|
123 |
-
description="Upload a video to track and annotate person movements"
|
124 |
)
|
125 |
|
126 |
# Launch the interface
|
127 |
-
|
128 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from ultralytics import YOLO
|
|
|
3 |
|
4 |
+
# Load YOLO model
|
5 |
+
model = YOLO("Suspicious_Activities_nano.pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
+
# Define a function to predict and return the result
|
8 |
+
def predict_suspicious_activity(image):
|
9 |
+
results = model.predict(source=image, show=True, conf=0.6)
|
10 |
+
return results.pandas().xywh[0].to_dict() # Adjust as per your model output structure
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
# Create Gradio interface
|
13 |
+
interface = gr.Interface(
|
14 |
+
fn=predict_suspicious_activity,
|
15 |
+
inputs=gr.inputs.Image(type="pil"), # Input type for uploading images
|
16 |
+
outputs="json" # Output type (you can adjust this based on your needs)
|
|
|
|
|
17 |
)
|
18 |
|
19 |
# Launch the interface
|
20 |
+
interface.launch()
|
|