shark_detection / app.py
piperod
adding predictions
02cdb95
raw
history blame
3.91 kB
import gradio as gr
import os
import subprocess
from huggingface_hub import snapshot_download
REPO_ID='piperod91/videos_examples'
snapshot_download(repo_id=REPO_ID, token= os.environ.get('SHARK_MODEL'),repo_type='dataset',local_dir='videos_example')
if os.getenv('SYSTEM') == 'spaces':
subprocess.call('pip install -U openmim'.split())
subprocess.call('pip install python-dotenv'.split())
subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
subprocess.call('mim install mmcv>=2.0.0'.split())
subprocess.call('mim install mmengine'.split())
subprocess.call('mim install mmdet'.split())
subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())
import cv2
import dotenv
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference import inference_frame
import os
import pathlib
def analize_video(x):
print(x)
path = '/tmp/test/'
os.makedirs(path, exist_ok=True)
videos = len(os.listdir(path))
path = f'{path}{videos}'
os.makedirs(path, exist_ok=True)
outname = f'{path}_processed.mp4'
if os.path.exists(outname):
print('video already processed')
return outname
cap = cv2.VideoCapture(x)
counter = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
name = os.path.join(path,f'{counter:05d}.png')
frame = inference_frame(frame)
# write the flipped frame
cv2.imwrite(name, frame)
counter +=1
else:
break
# Release everything if job is finished
print(path)
os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname} -y''')
return outname
def set_example_image(example: list) -> dict:
return gr.Video.update(value=example[0])
def show_video(example: list) -> dict:
return gr.Video.update(value=example[0])
with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
gr.Markdown("Initial DEMO.")
with gr.Tab("Shark Detector"):
with gr.Row():
video_input = gr.Video(source='upload',include_audio=False)
#video_input.style(witdh='50%',height='50%')
video_output = gr.Video()
#video_output.style(witdh='50%',height='50%')
video_button = gr.Button("Analyze")
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*.mp4'))
example_images = gr.Dataset(components=[video_input],
samples=[[path.as_posix()]
for path in paths if 'videos_side_by_side' not in str(path)])
video_button.click(analize_video, inputs=video_input, outputs=video_output)
example_images.click(fn=set_example_image,
inputs=example_images,
outputs=video_input)
with gr.Accordion("Current Detections"):
with gr.Row():
video_example = gr.Video(source='upload',include_audio=False,stream=True)
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*webm'))
example_preds = gr.Dataset(components=[video_example],
samples=[[path.as_posix()]
for path in paths])
example_preds.click(fn=show_video,
inputs=example_preds,
outputs=video_example)
demo.queue()
#if os.getenv('SYSTEM') == 'spaces':
demo.launch(width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD')))