rengjey commited on
Commit
bdf3244
1 Parent(s): 04024a4

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. V1.mp4 +3 -0
  3. app.py +80 -0
  4. hi.pt +3 -0
  5. requirements.txt +8 -0
  6. yolov5l.pt +3 -0
  7. yolov5n.pt +3 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ V1.mp4 filter=lfs diff=lfs merge=lfs -text
V1.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca303b2cea37a6a850ff933cf95e449210cd6101ab7446bb4eaa51124dd42f1
3
+ size 12962985
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import requests
4
+ import os
5
+ import torch
6
+ import numpy as np
7
+ from ultralytics import YOLO
8
+
9
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5l', pretrained=True)
10
+
11
+ area = [(48,430), (18, 515), (407,485), (750,425), (690,370)]
12
+ total_space = 12
13
+ count=0
14
+
15
+ def show_preds_video():
16
+ cap = cv2.VideoCapture('V1.mp4')
17
+ count=0
18
+ while(cap.isOpened()):
19
+ ret, frame = cap.read()
20
+ if not ret:
21
+ break
22
+ count += 1
23
+ if count % 2 != 0:
24
+ continue
25
+
26
+ frame=cv2.resize(frame,(1020,600))
27
+ frame_copy = frame.copy()
28
+ Vehicle_cnt = 0
29
+
30
+ results=model(frame)
31
+ for index, row in results.pandas().xyxy[0].iterrows():
32
+ x1 = int(row['xmin'])
33
+ y1 = int(row['ymin'])
34
+ x2 = int(row['xmax'])
35
+ y2 = int(row['ymax'])
36
+ d=(row['name'])
37
+
38
+ cx=int(x1+x2)//2
39
+ cy=int(y1+y2)//2
40
+
41
+ if ('car' or 'truck') in d:
42
+ results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
43
+ if results >0:
44
+ cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
45
+ cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),2)
46
+ Vehicle_cnt += 1
47
+
48
+
49
+
50
+ free_space = total_space - Vehicle_cnt
51
+ cv2.putText(frame_copy, ("Free space: " + str(free_space)), (50,50) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
52
+
53
+ cv2.putText(frame_copy, str(str("vehicles: ")+ str(Vehicle_cnt) ), (50,85) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
54
+
55
+ cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
56
+
57
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
58
+
59
+
60
+ inputs_video = [
61
+ #gr.components.Video(type="filepath", label="Input Video"),
62
+
63
+ ]
64
+ outputs_video = [
65
+ gr.components.Image(type="numpy", label="Output Image"),
66
+ ]
67
+ interface_video = gr.Interface(
68
+ fn=show_preds_video,
69
+ inputs=inputs_video,
70
+ outputs=outputs_video,
71
+ title="Parking counter",
72
+ description="Click submit !!!'",
73
+ #examples=video_path,
74
+ cache_examples=False,
75
+ )
76
+
77
+ gr.TabbedInterface(
78
+ [interface_video],
79
+ tab_names=['Video inference']
80
+ ).queue().launch()
hi.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6421d3f2ffb28969f96f3db64a407fe83ae49f2345dfdc85540fecc5b17aa769
3
+ size 3780605
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ gradio==3.4.0
3
+ opencv-python
4
+ numpy<1.24
5
+ ultralytics
6
+ yolov5
7
+
8
+
yolov5l.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f603b7354c25454d1270663a14d8ddc1eea98e5eebc1d84ce0c6e3150fa155f
3
+ size 93622629
yolov5n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f180cf23ba0717ada0badd6c685026d73d48f184d00fc159c2641284b2ac0a3
3
+ size 4062133