File size: 2,478 Bytes
021ea63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79

import subprocess
import os  
if os.getenv('SYSTEM') == 'spaces':

    subprocess.call('pip install -U openmim'.split())
    subprocess.call('pip install python-dotenv'.split())
    subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
    subprocess.call('mim install mmcv>=2.0.0'.split())
    subprocess.call('mim install mmengine'.split())
    subprocess.call('mim install mmdet'.split())
    subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
    subprocess.call('pip install git+https://github.com/cocodataset/panopticapi.git'.split())

import gradio as gr

from huggingface_hub import snapshot_download
import cv2 
import dotenv 
dotenv.load_dotenv()
import numpy as np
import gradio as gr
import glob
from inference import inference_frame,inference_frame_serial
from inference import inference_frame_par_ready
from inference import process_frame
import os
import pathlib
import multiprocessing as mp
from time import time


REPO_ID='SharkSpace/videos_examples'
snapshot_download(repo_id=REPO_ID, token=os.environ.get('SHARK_MODEL'),repo_type='dataset',local_dir='videos_example')



    



def process_video(input_video):
    cap = cv2.VideoCapture(input_video)

    output_path = "output.mp4"

    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    video = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))

    iterating, frame = cap.read()
    while iterating:
        # flip frame vertically
        display_frame = inference_frame_serial(frame)
        video.write(frame)
        yield display_frame, None
        iterating, frame = cap.read()

    video.release()
    yield display_frame, output_path

with gr.Blocks() as demo:
    with gr.Row():
        input_video = gr.Video(label="input")
        processed_frames = gr.Image(label="last frame")
        output_video = gr.Video(label="output")

    with gr.Row():
        paths = sorted(pathlib.Path('videos_example/').rglob('*.mp4'))
        samples=[[path.as_posix()] for path in paths if 'raw_videos'  in str(path)]
        examples = gr.Examples(samples, inputs=input_video)
        process_video_btn = gr.Button("process video")

    process_video_btn.click(process_video, input_video, [processed_frames, output_video])

demo.queue()
demo.launch()