import gradio as gr import os if os.getenv('SYSTEM') == 'spaces': import mim mim.uninstall('mmcv-full', confirm_yes=True) mim.install('mmcv-full==1.5.2', is_yes=True) subprocess.call('pip uninstall -y opencv-python'.split()) subprocess.call('pip uninstall -y opencv-python-headless'.split()) subprocess.call('pip install opencv-python-headless==4.5.5.64'.split()) import cv2 import numpy as np import gradio as gr from inference import inference_frame import os def analize_video(x): cap = cv2.VideoCapture(x) path = '/tmp/test/' os.makedirs(path, exist_ok=True) videos = len(os.listdir(path)) path = f'{path}{videos}' os.makedirs(path, exist_ok=True) outname = f'{path}_processed.mp4' #out = cv2.VideoWriter(outname,cv2.VideoWriter_fourcc(*'h264'), 20.0, (640,480)) counter = 0 while(cap.isOpened()): ret, frame = cap.read() if ret==True: name = os.path.join(path,f'{counter:05d}.png') frame = inference_frame(frame) # write the flipped frame cv2.imwrite(name, frame) counter +=1 else: break # Release everything if job is finished print(path) os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname}''') return outname with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo: gr.Markdown("Initial DEMO.") with gr.Tab("Shark Detector"): with gr.Row(): video_input = gr.Video(source='upload',include_audio=False) #video_input.style(witdh='50%',height='50%') video_output = gr.Video() #video_output.style(witdh='50%',height='50%') video_button = gr.Button("Analyze") with gr.Accordion("Open for More!"): gr.Markdown("Place holder for detection") video_button.click(analize_video, inputs=video_input, outputs=video_output) demo.queue() demo.launch(share=True,width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD')))