Spaces:
Runtime error
Runtime error
File size: 3,915 Bytes
7576d10 d2aa7b7 f7db529 7576d10 ff9df39 cd0d6f2 eddda5a 7576d10 b73d81d cd0d6f2 5bbee66 6e8c2ef 4d701c0 eddda5a e213266 7576d10 8353801 cd0d6f2 8353801 02cdb95 8353801 cd0d6f2 8353801 02cdb95 7576d10 cd0d6f2 7576d10 cd0d6f2 7576d10 cd0d6f2 7576d10 cd0d6f2 7576d10 cd0d6f2 02cdb95 cd0d6f2 7576d10 cd0d6f2 02cdb95 cd0d6f2 02cdb95 7576d10 cd0d6f2 02cdb95 9f85afd 02cdb95 cd0d6f2 7576d10 cd0d6f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
import os
import subprocess
from huggingface_hub import snapshot_download
REPO_ID='piperod91/videos_examples'
snapshot_download(repo_id=REPO_ID, token= os.environ.get('SHARK_MODEL'),repo_type='dataset',local_dir='videos_example')
if os.getenv('SYSTEM') == 'spaces':
subprocess.call('pip install -U openmim'.split())
subprocess.call('pip install python-dotenv'.split())
subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
subprocess.call('mim install mmcv>=2.0.0'.split())
subprocess.call('mim install mmengine'.split())
subprocess.call('mim install mmdet'.split())
subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())
import cv2
import dotenv
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference import inference_frame
import os
import pathlib
def analize_video(x):
print(x)
path = '/tmp/test/'
os.makedirs(path, exist_ok=True)
videos = len(os.listdir(path))
path = f'{path}{videos}'
os.makedirs(path, exist_ok=True)
outname = f'{path}_processed.mp4'
if os.path.exists(outname):
print('video already processed')
return outname
cap = cv2.VideoCapture(x)
counter = 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
name = os.path.join(path,f'{counter:05d}.png')
frame = inference_frame(frame)
# write the flipped frame
cv2.imwrite(name, frame)
counter +=1
else:
break
# Release everything if job is finished
print(path)
os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname} -y''')
return outname
def set_example_image(example: list) -> dict:
return gr.Video.update(value=example[0])
def show_video(example: list) -> dict:
return gr.Video.update(value=example[0])
with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
gr.Markdown("Initial DEMO.")
with gr.Tab("Shark Detector"):
with gr.Row():
video_input = gr.Video(source='upload',include_audio=False)
#video_input.style(witdh='50%',height='50%')
video_output = gr.Video()
#video_output.style(witdh='50%',height='50%')
video_button = gr.Button("Analyze")
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*.mp4'))
example_images = gr.Dataset(components=[video_input],
samples=[[path.as_posix()]
for path in paths if 'videos_side_by_side' not in str(path)])
video_button.click(analize_video, inputs=video_input, outputs=video_output)
example_images.click(fn=set_example_image,
inputs=example_images,
outputs=video_input)
with gr.Accordion("Current Detections"):
with gr.Row():
video_example = gr.Video(source='upload',include_audio=False,stream=True)
with gr.Row():
paths = sorted(pathlib.Path('videos_example/').rglob('*rgb.mp4'))
example_preds = gr.Dataset(components=[video_example],
samples=[[path.as_posix()]
for path in paths])
example_preds.click(fn=show_video,
inputs=example_preds,
outputs=video_example)
demo.queue()
#if os.getenv('SYSTEM') == 'spaces':
demo.launch(width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD')))
|