import gradio as gr import os import cv2 import numpy as np from moviepy.editor import * token = os.environ.get('HF_TOKEN') pix2pix = gr.Blocks.load(name="spaces/fffiloni/instruct-pix2pix-clone", api_key=token) def get_frames(video_in): frames = [] #resize the video clip = mp.VideoFileClip(video_in) clip_resized = clip.resize(height=512) clip_resized.write_videofile("video_resized.mp4") print("video resized to 512 height") # Opens the Video file with CV2 cap= cv2.VideoCapture("video_resized.mp4") fps = cap.get(cv2.CAP_PROP_FPS) i=0 while(cap.isOpened()): ret, frame = cap.read() if ret == False: break cv2.imwrite('kang'+str(i)+'.jpg',frame) frames.append('kang'+str(i)+'.jpg') i+=1 cap.release() cv2.destroyAllWindows() print("broke the video into frames") return frames, fps def create_video(frames, fps): print("building video result") clip = ImageSequenceClip(frames, fps=fps) clip.write_videofile("movie.mp4", fps=fps) return 'movie.mp4' def infer(prompt,video_in, seed_in, trim_value): print(prompt) break_vid = get_frames(video_in) frames_list= break_vid[0] fps = break_vid[1] n_frame = int(trim_value*fps) if n_frame >= len(frames_list): n_frame = len(frames_list) result_frames = [] print("set stop frames to: " + n_frame) for i in frames_list[0:int(n_frame)]: pix2pix_img = pix2pix(prompt,5.5,1.5,i,15,"",512,512,seed_in,fn_index=0) images = [os.path.join(pix2pix_img[0], img) for img in os.listdir(pix2pix_img[0])] result_frames.append(images[0]) print("frame " + i + ": done;") final_vid = create_video(result_frames, fps) print("finished !") return final_vid title = """

Pix2Pix Video

Apply Instruct Pix2Pix Diffusion to a video

""" article = """ """ with gr.Blocks(css='style.css') as demo: with gr.Column(elem_id="col-container"): gr.HTML(title) with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt", placeholder="enter prompt", show_label=False) video_inp = gr.Video(label="Video source", source="upload", type="filepath") with gr.Column(): with gr.Row(): seed_inp = gr.Slider(minimum=0, maximum=10000, step=1, value=123456) trim_in = gr.Slider(label="Cut video at (s)", minimun=2, maximum=10, step=1, value=2) video_out = gr.Video(label="Pix2pix video result") submit_btn = gr.Button("Generate Pix2Pix video") gr.HTML(article) inputs = [prompt,video_inp,seed_inp, trim_in] outputs = [video_out] submit_btn.click(infer, inputs, outputs) demo.launch().queue(max_size=12)