File size: 2,257 Bytes
bdf3244 a144977 bdf3244 a144977 bdf3244 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
import cv2
import requests
import os
import torch
import numpy as np
from ultralytics import YOLO
model = torch.hub.load('ultralytics/yolov5', 'hi', pretrained=True)
area = [(48,430), (18, 515), (407,485), (750,425), (690,370)]
total_space = 12
count=0
def show_preds_video():
cap = cv2.VideoCapture('V111.mp4')
count=0
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
count += 1
if count % 2 != 0:
continue
frame=cv2.resize(frame,(1020,600))
frame_copy = frame.copy()
Vehicle_cnt = 0
results=model(frame)
for index, row in results.pandas().xyxy[0].iterrows():
x1 = int(row['xmin'])
y1 = int(row['ymin'])
x2 = int(row['xmax'])
y2 = int(row['ymax'])
d=(row['name'])
cx=int(x1+x2)//2
cy=int(y1+y2)//2
if ('car' or 'truck') in d:
results = cv2.pointPolygonTest(np.array(area, np.int32), ((cx,cy)), False)
if results >0:
cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(0,0,255),2)
cv2.putText(frame_copy,str(d),(x1,y1),cv2.FONT_HERSHEY_PLAIN,2,(255,255,0),2)
Vehicle_cnt += 1
free_space = total_space - Vehicle_cnt
cv2.putText(frame_copy, ("Free space: " + str(free_space)), (50,50) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
cv2.putText(frame_copy, str(str("vehicles: ")+ str(Vehicle_cnt) ), (50,85) ,cv2.FONT_HERSHEY_PLAIN,2,(0,255,0),2)
cv2.polylines(frame_copy, [np.array(area, np.int32)], True, (0,255,0), 2)
yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
inputs_video = [
#gr.components.Video(type="filepath", label="Input Video"),
]
outputs_video = [
gr.components.Image(type="numpy", label="Output Image"),
]
interface_video = gr.Interface(
fn=show_preds_video,
inputs=inputs_video,
outputs=outputs_video,
title="Parking counter",
description="Click submit !!!'",
#examples=video_path,
cache_examples=False,
)
gr.TabbedInterface(
[interface_video],
tab_names=['Video inference']
).queue().launch() |