AI-Naga commited on
Commit
e7861bb
1 Parent(s): 963547d

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -79
app.py DELETED
@@ -1,79 +0,0 @@
1
-
2
- import gradio as gr
3
- import cv2
4
- import requests
5
- import os
6
- import torch
7
- import numpy as np
8
- from ultralytics import YOLO
9
-
10
- model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
11
- path = [['image_0.jpg'], ['image_1.jpg']]
12
- video_path = [['TresPass_Detection_1.mp4']]
13
- # area = [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
14
- # area = [(190, 180), (100, 75), (360, 55), (510, 140), (190, 180) ]
15
- area = [(215, 180), (110, 80), (360, 55), (510, 140), (215, 180) ]
16
- # def show_preds_video(video_path):
17
- def show_preds_video():
18
- cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
19
- count=0
20
- while(cap.isOpened()):
21
- ret, frame = cap.read()
22
- if not ret:
23
- break
24
- count += 1
25
- if count % 8 != 0:
26
- continue
27
- # frame = cv2.imread(video_path)
28
-
29
- frame=cv2.resize(frame,(1020,600))
30
- frame_copy = frame.copy()
31
-
32
- cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
33
-
34
- results=model(frame)
35
- for index, row in results.pandas().xyxy[0].iterrows():
36
- x1 = int(row['xmin'])
37
- y1 = int(row['ymin'])
38
- x2 = int(row['xmax'])
39
- y2 = int(row['ymax'])
40
- d=(row['name'])
41
-
42
- cx=int(x1+x2)//2
43
- cy=int(y1+y2)//2
44
-
45
- if ('person') in d:
46
- results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
47
- # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y1)), False)
48
- # results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y2)), False)
49
- if results >0:
50
- cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
51
- cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
52
- cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,300),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)
53
-
54
-
55
-
56
- yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
57
-
58
- inputs_video = [ #gr.components.Video(type="filepath", label="Input Video", visible =False),
59
- ]
60
-
61
- outputs_video = [
62
- gr.components.Image(type="numpy", label="Output Image"),
63
- ]
64
-
65
- interface_video = gr.Interface(
66
- fn=show_preds_video,
67
- inputs=inputs_video,
68
- outputs=outputs_video,
69
- title="Security - Trespasser monitoring ",
70
- examples=video_path,
71
- cache_examples=False,
72
-
73
- )
74
-
75
- gr.TabbedInterface(
76
- [interface_video],
77
- # [interface_image, interface_video],
78
- tab_names=['Video inference']
79
- ).queue().launch()