AI-Naga's picture
Update app.py
d99575f
raw
history blame
2.58 kB
import gradio as gr
import cv2
import requests
import os
import torch
import numpy as np
from ultralytics import YOLO
model = torch.hub.load('ultralytics/yolov5', 'yolov5x', pretrained=True)
path = [['image_0.jpg'], ['image_1.jpg']]
video_path = [['TresPass_Detection_1.mp4']]
# area = [(215, 180), (110, 75), (370, 55), (520, 140), (215, 180) ]
# area = [(190, 180), (100, 75), (360, 55), (510, 140), (190, 180) ]
area = [(215, 180), (110, 80), (360, 55), (510, 140), (215, 180) ]
# def show_preds_video(video_path):
def show_preds_video():
cap = cv2.VideoCapture('TresPass_Detection_1.mp4')
count=0
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
count += 1
if count % 10 != 0:
continue
# frame = cv2.imread(video_path)
frame=cv2.resize(frame,(1020,600))
frame_copy = frame.copy()
cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
results=model(frame)
for index, row in results.pandas().xyxy[0].iterrows():
x1 = int(row['xmin'])
y1 = int(row['ymin'])
x2 = int(row['xmax'])
y2 = int(row['ymax'])
d=(row['name'])
cx=int(x1+x2)//2
cy=int(y1+y2)//2
if ('person') in d:
results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
# results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y1)), False)
# results = cv2.pointPolygonTest(np.array(area, np.int32), ((x2,y2)), False)
if results >0:
cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
cv2.putText(frame_copy,str("Alert !!! Trespasser detected !!!"),(50,300),cv2.FONT_HERSHEY_PLAIN,2,(0,0,255),3)
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
inputs_video = [ #gr.components.Video(type="filepath", label="Input Video", visible =False),
]
outputs_video = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title="Security - Trespasser monitoring ",
examples=video_path,
cache_examples=False,
)
gr.TabbedInterface(
[interface_video],
# [interface_image, interface_video],
tab_names=['Video inference']
).queue().launch()