Upload 17 files
Browse files- .gitattributes +3 -0
- app.py +110 -0
- assets/Objdetectionyoutubegif-1.m4v +0 -0
- assets/pic1.png +0 -0
- assets/pic3.png +0 -0
- assets/segmentation.png +0 -0
- helper.py +239 -0
- images/office_4.jpg +0 -0
- images/office_4_detected.jpg +0 -0
- packages.txt +3 -0
- requirements.txt +83 -0
- settings.py +46 -0
- videos/video_1.mp4 +3 -0
- videos/video_2.mp4 +3 -0
- videos/video_3.mp4 +3 -0
- weights/yolov8n-cls.pt +3 -0
- weights/yolov8n-seg.pt +3 -0
- weights/yolov8n.pt +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
videos/video_1.mp4 filter=lfs diff=lfs merge=lfs -text
|
37 |
+
videos/video_2.mp4 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
videos/video_3.mp4 filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Python In-built packages
|
2 |
+
from pathlib import Path
|
3 |
+
import PIL
|
4 |
+
|
5 |
+
# External packages
|
6 |
+
import streamlit as st
|
7 |
+
|
8 |
+
# Local Modules
|
9 |
+
import settings
|
10 |
+
import helper
|
11 |
+
|
12 |
+
# Setting page layout
|
13 |
+
st.set_page_config(
|
14 |
+
page_title="Object Detection using YOLOv8",
|
15 |
+
page_icon="🤖",
|
16 |
+
layout="wide",
|
17 |
+
initial_sidebar_state="expanded"
|
18 |
+
)
|
19 |
+
|
20 |
+
# Main page heading
|
21 |
+
st.title("Object Detection And Tracking using YOLOv8")
|
22 |
+
|
23 |
+
# Sidebar
|
24 |
+
st.sidebar.header("ML Model Config")
|
25 |
+
|
26 |
+
# Model Options
|
27 |
+
model_type = st.sidebar.radio(
|
28 |
+
"Select Task", ['Detection', 'Segmentation'])
|
29 |
+
|
30 |
+
confidence = float(st.sidebar.slider(
|
31 |
+
"Select Model Confidence", 25, 100, 40)) / 100
|
32 |
+
|
33 |
+
# Selecting Detection Or Segmentation
|
34 |
+
if model_type == 'Detection':
|
35 |
+
model_path = Path(settings.DETECTION_MODEL)
|
36 |
+
elif model_type == 'Segmentation':
|
37 |
+
model_path = Path(settings.SEGMENTATION_MODEL)
|
38 |
+
|
39 |
+
# Load Pre-trained ML Model
|
40 |
+
try:
|
41 |
+
model = helper.load_model(model_path)
|
42 |
+
except Exception as ex:
|
43 |
+
st.error(f"Unable to load model. Check the specified path: {model_path}")
|
44 |
+
st.error(ex)
|
45 |
+
|
46 |
+
st.sidebar.header("Image/Video Config")
|
47 |
+
source_radio = st.sidebar.radio(
|
48 |
+
"Select Source", settings.SOURCES_LIST)
|
49 |
+
|
50 |
+
source_img = None
|
51 |
+
# If image is selected
|
52 |
+
if source_radio == settings.IMAGE:
|
53 |
+
source_img = st.sidebar.file_uploader(
|
54 |
+
"Choose an image...", type=("jpg", "jpeg", "png", 'bmp', 'webp'))
|
55 |
+
|
56 |
+
col1, col2 = st.columns(2)
|
57 |
+
|
58 |
+
with col1:
|
59 |
+
try:
|
60 |
+
if source_img is None:
|
61 |
+
default_image_path = str(settings.DEFAULT_IMAGE)
|
62 |
+
default_image = PIL.Image.open(default_image_path)
|
63 |
+
st.image(default_image_path, caption="Default Image",
|
64 |
+
use_column_width=True)
|
65 |
+
else:
|
66 |
+
uploaded_image = PIL.Image.open(source_img)
|
67 |
+
st.image(source_img, caption="Uploaded Image",
|
68 |
+
use_column_width=True)
|
69 |
+
except Exception as ex:
|
70 |
+
st.error("Error occurred while opening the image.")
|
71 |
+
st.error(ex)
|
72 |
+
|
73 |
+
with col2:
|
74 |
+
if source_img is None:
|
75 |
+
default_detected_image_path = str(settings.DEFAULT_DETECT_IMAGE)
|
76 |
+
default_detected_image = PIL.Image.open(
|
77 |
+
default_detected_image_path)
|
78 |
+
st.image(default_detected_image_path, caption='Detected Image',
|
79 |
+
use_column_width=True)
|
80 |
+
else:
|
81 |
+
if st.sidebar.button('Detect Objects'):
|
82 |
+
res = model.predict(uploaded_image,
|
83 |
+
conf=confidence
|
84 |
+
)
|
85 |
+
boxes = res[0].boxes
|
86 |
+
res_plotted = res[0].plot()[:, :, ::-1]
|
87 |
+
st.image(res_plotted, caption='Detected Image',
|
88 |
+
use_column_width=True)
|
89 |
+
try:
|
90 |
+
with st.expander("Detection Results"):
|
91 |
+
for box in boxes:
|
92 |
+
st.write(box.data)
|
93 |
+
except Exception as ex:
|
94 |
+
# st.write(ex)
|
95 |
+
st.write("No image is uploaded yet!")
|
96 |
+
|
97 |
+
elif source_radio == settings.VIDEO:
|
98 |
+
helper.play_stored_video(confidence, model)
|
99 |
+
|
100 |
+
elif source_radio == settings.WEBCAM:
|
101 |
+
helper.play_webcam(confidence, model)
|
102 |
+
|
103 |
+
elif source_radio == settings.RTSP:
|
104 |
+
helper.play_rtsp_stream(confidence, model)
|
105 |
+
|
106 |
+
elif source_radio == settings.YOUTUBE:
|
107 |
+
helper.play_youtube_video(confidence, model)
|
108 |
+
|
109 |
+
else:
|
110 |
+
st.error("Please select a valid source type!")
|
assets/Objdetectionyoutubegif-1.m4v
ADDED
Binary file (528 kB). View file
|
|
assets/pic1.png
ADDED
assets/pic3.png
ADDED
assets/segmentation.png
ADDED
helper.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
import streamlit as st
|
3 |
+
import cv2
|
4 |
+
import yt_dlp
|
5 |
+
import settings
|
6 |
+
|
7 |
+
|
8 |
+
def load_model(model_path):
|
9 |
+
"""
|
10 |
+
Loads a YOLO object detection model from the specified model_path.
|
11 |
+
|
12 |
+
Parameters:
|
13 |
+
model_path (str): The path to the YOLO model file.
|
14 |
+
|
15 |
+
Returns:
|
16 |
+
A YOLO object detection model.
|
17 |
+
"""
|
18 |
+
model = YOLO(model_path)
|
19 |
+
return model
|
20 |
+
|
21 |
+
|
22 |
+
def display_tracker_options():
|
23 |
+
display_tracker = st.radio("Display Tracker", ('Yes', 'No'))
|
24 |
+
is_display_tracker = True if display_tracker == 'Yes' else False
|
25 |
+
if is_display_tracker:
|
26 |
+
tracker_type = st.radio("Tracker", ("bytetrack.yaml", "botsort.yaml"))
|
27 |
+
return is_display_tracker, tracker_type
|
28 |
+
return is_display_tracker, None
|
29 |
+
|
30 |
+
|
31 |
+
def _display_detected_frames(conf, model, st_frame, image, is_display_tracking=None, tracker=None):
|
32 |
+
"""
|
33 |
+
Display the detected objects on a video frame using the YOLOv8 model.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
- conf (float): Confidence threshold for object detection.
|
37 |
+
- model (YoloV8): A YOLOv8 object detection model.
|
38 |
+
- st_frame (Streamlit object): A Streamlit object to display the detected video.
|
39 |
+
- image (numpy array): A numpy array representing the video frame.
|
40 |
+
- is_display_tracking (bool): A flag indicating whether to display object tracking (default=None).
|
41 |
+
|
42 |
+
Returns:
|
43 |
+
None
|
44 |
+
"""
|
45 |
+
|
46 |
+
# Resize the image to a standard size
|
47 |
+
image = cv2.resize(image, (720, int(720*(9/16))))
|
48 |
+
|
49 |
+
# Display object tracking, if specified
|
50 |
+
if is_display_tracking:
|
51 |
+
res = model.track(image, conf=conf, persist=True, tracker=tracker)
|
52 |
+
else:
|
53 |
+
# Predict the objects in the image using the YOLOv8 model
|
54 |
+
res = model.predict(image, conf=conf)
|
55 |
+
|
56 |
+
# # Plot the detected objects on the video frame
|
57 |
+
res_plotted = res[0].plot()
|
58 |
+
st_frame.image(res_plotted,
|
59 |
+
caption='Detected Video',
|
60 |
+
channels="BGR",
|
61 |
+
use_column_width=True
|
62 |
+
)
|
63 |
+
|
64 |
+
|
65 |
+
def get_youtube_stream_url(youtube_url):
|
66 |
+
ydl_opts = {
|
67 |
+
'format': 'best[ext=mp4]',
|
68 |
+
'no_warnings': True,
|
69 |
+
'quiet': True
|
70 |
+
}
|
71 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
72 |
+
info = ydl.extract_info(youtube_url, download=False)
|
73 |
+
return info['url']
|
74 |
+
|
75 |
+
|
76 |
+
def play_youtube_video(conf, model):
|
77 |
+
source_youtube = st.sidebar.text_input("YouTube Video url")
|
78 |
+
is_display_tracker, tracker = display_tracker_options()
|
79 |
+
|
80 |
+
if st.sidebar.button('Detect Objects'):
|
81 |
+
if not source_youtube:
|
82 |
+
st.sidebar.error("Please enter a YouTube URL")
|
83 |
+
return
|
84 |
+
|
85 |
+
try:
|
86 |
+
st.sidebar.info("Extracting video stream URL...")
|
87 |
+
stream_url = get_youtube_stream_url(source_youtube)
|
88 |
+
|
89 |
+
st.sidebar.info("Opening video stream...")
|
90 |
+
vid_cap = cv2.VideoCapture(stream_url)
|
91 |
+
|
92 |
+
if not vid_cap.isOpened():
|
93 |
+
st.sidebar.error(
|
94 |
+
"Failed to open video stream. Please try a different video.")
|
95 |
+
return
|
96 |
+
|
97 |
+
st.sidebar.success("Video stream opened successfully!")
|
98 |
+
st_frame = st.empty()
|
99 |
+
while vid_cap.isOpened():
|
100 |
+
success, image = vid_cap.read()
|
101 |
+
if success:
|
102 |
+
_display_detected_frames(
|
103 |
+
conf,
|
104 |
+
model,
|
105 |
+
st_frame,
|
106 |
+
image,
|
107 |
+
is_display_tracker,
|
108 |
+
tracker
|
109 |
+
)
|
110 |
+
else:
|
111 |
+
break
|
112 |
+
|
113 |
+
vid_cap.release()
|
114 |
+
|
115 |
+
except Exception as e:
|
116 |
+
st.sidebar.error(f"An error occurred: {str(e)}")
|
117 |
+
|
118 |
+
|
119 |
+
def play_rtsp_stream(conf, model):
|
120 |
+
"""
|
121 |
+
Plays an rtsp stream. Detects Objects in real-time using the YOLOv8 object detection model.
|
122 |
+
|
123 |
+
Parameters:
|
124 |
+
conf: Confidence of YOLOv8 model.
|
125 |
+
model: An instance of the `YOLOv8` class containing the YOLOv8 model.
|
126 |
+
|
127 |
+
Returns:
|
128 |
+
None
|
129 |
+
|
130 |
+
Raises:
|
131 |
+
None
|
132 |
+
"""
|
133 |
+
source_rtsp = st.sidebar.text_input("rtsp stream url:")
|
134 |
+
st.sidebar.caption(
|
135 |
+
'Example URL: rtsp://admin:12345@192.168.1.210:554/Streaming/Channels/101')
|
136 |
+
is_display_tracker, tracker = display_tracker_options()
|
137 |
+
if st.sidebar.button('Detect Objects'):
|
138 |
+
try:
|
139 |
+
vid_cap = cv2.VideoCapture(source_rtsp)
|
140 |
+
st_frame = st.empty()
|
141 |
+
while (vid_cap.isOpened()):
|
142 |
+
success, image = vid_cap.read()
|
143 |
+
if success:
|
144 |
+
_display_detected_frames(conf,
|
145 |
+
model,
|
146 |
+
st_frame,
|
147 |
+
image,
|
148 |
+
is_display_tracker,
|
149 |
+
tracker
|
150 |
+
)
|
151 |
+
else:
|
152 |
+
vid_cap.release()
|
153 |
+
break
|
154 |
+
except Exception as e:
|
155 |
+
vid_cap.release()
|
156 |
+
st.sidebar.error("Error loading RTSP stream: " + str(e))
|
157 |
+
|
158 |
+
|
159 |
+
def play_webcam(conf, model):
|
160 |
+
"""
|
161 |
+
Plays a webcam stream. Detects Objects in real-time using the YOLOv8 object detection model.
|
162 |
+
|
163 |
+
Parameters:
|
164 |
+
conf: Confidence of YOLOv8 model.
|
165 |
+
model: An instance of the `YOLOv8` class containing the YOLOv8 model.
|
166 |
+
|
167 |
+
Returns:
|
168 |
+
None
|
169 |
+
|
170 |
+
Raises:
|
171 |
+
None
|
172 |
+
"""
|
173 |
+
source_webcam = settings.WEBCAM_PATH
|
174 |
+
is_display_tracker, tracker = display_tracker_options()
|
175 |
+
if st.sidebar.button('Detect Objects'):
|
176 |
+
try:
|
177 |
+
vid_cap = cv2.VideoCapture(source_webcam)
|
178 |
+
st_frame = st.empty()
|
179 |
+
while (vid_cap.isOpened()):
|
180 |
+
success, image = vid_cap.read()
|
181 |
+
if success:
|
182 |
+
_display_detected_frames(conf,
|
183 |
+
model,
|
184 |
+
st_frame,
|
185 |
+
image,
|
186 |
+
is_display_tracker,
|
187 |
+
tracker,
|
188 |
+
)
|
189 |
+
else:
|
190 |
+
vid_cap.release()
|
191 |
+
break
|
192 |
+
except Exception as e:
|
193 |
+
st.sidebar.error("Error loading video: " + str(e))
|
194 |
+
|
195 |
+
|
196 |
+
def play_stored_video(conf, model):
|
197 |
+
"""
|
198 |
+
Plays a stored video file. Tracks and detects objects in real-time using the YOLOv8 object detection model.
|
199 |
+
|
200 |
+
Parameters:
|
201 |
+
conf: Confidence of YOLOv8 model.
|
202 |
+
model: An instance of the `YOLOv8` class containing the YOLOv8 model.
|
203 |
+
|
204 |
+
Returns:
|
205 |
+
None
|
206 |
+
|
207 |
+
Raises:
|
208 |
+
None
|
209 |
+
"""
|
210 |
+
source_vid = st.sidebar.selectbox(
|
211 |
+
"Choose a video...", settings.VIDEOS_DICT.keys())
|
212 |
+
|
213 |
+
is_display_tracker, tracker = display_tracker_options()
|
214 |
+
|
215 |
+
with open(settings.VIDEOS_DICT.get(source_vid), 'rb') as video_file:
|
216 |
+
video_bytes = video_file.read()
|
217 |
+
if video_bytes:
|
218 |
+
st.video(video_bytes)
|
219 |
+
|
220 |
+
if st.sidebar.button('Detect Video Objects'):
|
221 |
+
try:
|
222 |
+
vid_cap = cv2.VideoCapture(
|
223 |
+
str(settings.VIDEOS_DICT.get(source_vid)))
|
224 |
+
st_frame = st.empty()
|
225 |
+
while (vid_cap.isOpened()):
|
226 |
+
success, image = vid_cap.read()
|
227 |
+
if success:
|
228 |
+
_display_detected_frames(conf,
|
229 |
+
model,
|
230 |
+
st_frame,
|
231 |
+
image,
|
232 |
+
is_display_tracker,
|
233 |
+
tracker
|
234 |
+
)
|
235 |
+
else:
|
236 |
+
vid_cap.release()
|
237 |
+
break
|
238 |
+
except Exception as e:
|
239 |
+
st.sidebar.error("Error loading video: " + str(e))
|
images/office_4.jpg
ADDED
images/office_4_detected.jpg
ADDED
packages.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
freeglut3-dev
|
2 |
+
libgtk2.0-dev
|
3 |
+
libgl1-mesa-glx
|
requirements.txt
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
altair==5.3.0
|
2 |
+
attrs==23.2.0
|
3 |
+
blinker==1.8.2
|
4 |
+
Brotli==1.1.0
|
5 |
+
cachetools==5.4.0
|
6 |
+
certifi==2024.7.4
|
7 |
+
charset-normalizer==3.3.2
|
8 |
+
click==8.1.7
|
9 |
+
contourpy==1.2.1
|
10 |
+
cycler==0.12.1
|
11 |
+
filelock==3.15.4
|
12 |
+
fonttools==4.53.1
|
13 |
+
fsspec==2024.6.1
|
14 |
+
gitdb==4.0.11
|
15 |
+
GitPython==3.1.43
|
16 |
+
idna==3.7
|
17 |
+
Jinja2==3.1.4
|
18 |
+
jsonschema==4.23.0
|
19 |
+
jsonschema-specifications==2023.12.1
|
20 |
+
kiwisolver==1.4.5
|
21 |
+
lapx==0.5.9.post1
|
22 |
+
markdown-it-py==3.0.0
|
23 |
+
MarkupSafe==2.1.5
|
24 |
+
matplotlib==3.9.1
|
25 |
+
mdurl==0.1.2
|
26 |
+
mpmath==1.3.0
|
27 |
+
mutagen==1.47.0
|
28 |
+
networkx==3.3
|
29 |
+
numpy==1.26.4
|
30 |
+
nvidia-cublas-cu12==12.1.3.1
|
31 |
+
nvidia-cuda-cupti-cu12==12.1.105
|
32 |
+
nvidia-cuda-nvrtc-cu12==12.1.105
|
33 |
+
nvidia-cuda-runtime-cu12==12.1.105
|
34 |
+
nvidia-cudnn-cu12==8.9.2.26
|
35 |
+
nvidia-cufft-cu12==11.0.2.54
|
36 |
+
nvidia-curand-cu12==10.3.2.106
|
37 |
+
nvidia-cusolver-cu12==11.4.5.107
|
38 |
+
nvidia-cusparse-cu12==12.1.0.106
|
39 |
+
nvidia-nccl-cu12==2.20.5
|
40 |
+
nvidia-nvjitlink-cu12==12.5.82
|
41 |
+
nvidia-nvtx-cu12==12.1.105
|
42 |
+
opencv-python==4.10.0.84
|
43 |
+
packaging==24.1
|
44 |
+
pafy==0.5.5
|
45 |
+
pandas==2.2.2
|
46 |
+
pillow==10.4.0
|
47 |
+
protobuf==5.27.2
|
48 |
+
psutil==6.0.0
|
49 |
+
py-cpuinfo==9.0.0
|
50 |
+
pyarrow==17.0.0
|
51 |
+
pycryptodomex==3.20.0
|
52 |
+
pydeck==0.9.1
|
53 |
+
Pygments==2.18.0
|
54 |
+
pyparsing==3.1.2
|
55 |
+
python-dateutil==2.9.0.post0
|
56 |
+
pytz==2024.1
|
57 |
+
PyYAML==6.0.1
|
58 |
+
referencing==0.35.1
|
59 |
+
requests==2.32.3
|
60 |
+
rich==13.7.1
|
61 |
+
rpds-py==0.19.0
|
62 |
+
scipy==1.14.0
|
63 |
+
seaborn==0.13.2
|
64 |
+
six==1.16.0
|
65 |
+
smmap==5.0.1
|
66 |
+
streamlit==1.36.0
|
67 |
+
sympy==1.13.0
|
68 |
+
tenacity==8.5.0
|
69 |
+
toml==0.10.2
|
70 |
+
toolz==0.12.1
|
71 |
+
torch==2.3.1
|
72 |
+
torchvision==0.18.1
|
73 |
+
tornado==6.4.1
|
74 |
+
tqdm==4.66.4
|
75 |
+
triton==2.3.1
|
76 |
+
typing_extensions==4.12.2
|
77 |
+
tzdata==2024.1
|
78 |
+
ultralytics==8.2.60
|
79 |
+
ultralytics-thop==2.0.0
|
80 |
+
urllib3==2.2.2
|
81 |
+
watchdog==4.0.1
|
82 |
+
websockets==12.0
|
83 |
+
yt-dlp==2024.7.16
|
settings.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
import sys
|
3 |
+
|
4 |
+
# Get the absolute path of the current file
|
5 |
+
FILE = Path(__file__).resolve()
|
6 |
+
# Get the parent directory of the current file
|
7 |
+
ROOT = FILE.parent
|
8 |
+
# Add the root path to the sys.path list if it is not already there
|
9 |
+
if ROOT not in sys.path:
|
10 |
+
sys.path.append(str(ROOT))
|
11 |
+
# Get the relative path of the root directory with respect to the current working directory
|
12 |
+
ROOT = ROOT.relative_to(Path.cwd())
|
13 |
+
|
14 |
+
# Sources
|
15 |
+
IMAGE = 'Image'
|
16 |
+
VIDEO = 'Video'
|
17 |
+
WEBCAM = 'Webcam'
|
18 |
+
RTSP = 'RTSP'
|
19 |
+
YOUTUBE = 'YouTube'
|
20 |
+
|
21 |
+
SOURCES_LIST = [IMAGE, VIDEO, WEBCAM, RTSP, YOUTUBE]
|
22 |
+
|
23 |
+
# Images config
|
24 |
+
IMAGES_DIR = ROOT / 'images'
|
25 |
+
DEFAULT_IMAGE = IMAGES_DIR / 'office_4.jpg'
|
26 |
+
DEFAULT_DETECT_IMAGE = IMAGES_DIR / 'office_4_detected.jpg'
|
27 |
+
|
28 |
+
# Videos config
|
29 |
+
VIDEO_DIR = ROOT / 'videos'
|
30 |
+
VIDEOS_DICT = {
|
31 |
+
'video_1': VIDEO_DIR / 'video_1.mp4',
|
32 |
+
'video_2': VIDEO_DIR / 'video_2.mp4',
|
33 |
+
'video_3': VIDEO_DIR / 'video_3.mp4',
|
34 |
+
}
|
35 |
+
|
36 |
+
# ML Model config
|
37 |
+
MODEL_DIR = ROOT / 'weights'
|
38 |
+
DETECTION_MODEL = MODEL_DIR / 'yolov8n.pt'
|
39 |
+
# In case of your custome model comment out the line above and
|
40 |
+
# Place your custom model pt file name at the line below
|
41 |
+
# DETECTION_MODEL = MODEL_DIR / 'my_detection_model.pt'
|
42 |
+
|
43 |
+
SEGMENTATION_MODEL = MODEL_DIR / 'yolov8n-seg.pt'
|
44 |
+
|
45 |
+
# Webcam
|
46 |
+
WEBCAM_PATH = 0
|
videos/video_1.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:514107b98ebaf53069b2471234dd6eaa3d08ff6447433d06cbdea0cc6d822e13
|
3 |
+
size 14985417
|
videos/video_2.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0270dec433a05236c6d89c7cb007db1780d0f3ece738fc45137496c9812892d9
|
3 |
+
size 15235393
|
videos/video_3.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf59af9ffa88275c0f31250e18d0cd27eec554856074482b283f0a5614bc6b6e
|
3 |
+
size 1065720
|
weights/yolov8n-cls.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5079cd980628313a2e62cd09d358e5c8debf0d8f75b6e8be7973d94e3a5da9f
|
3 |
+
size 5533216
|
weights/yolov8n-seg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d39e867b2c3a5dbc1aa764411544b475cb14727bf6af1ec46c238f8bb1351ab9
|
3 |
+
size 7054355
|
weights/yolov8n.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31e20dde3def09e2cf938c7be6fe23d9150bbbe503982af13345706515f2ef95
|
3 |
+
size 6534387
|