Spaces:
Running
Running
File size: 3,054 Bytes
eadf256 8aee673 eadf256 497c126 a976824 aef8a67 a976824 0bb1032 09bc438 77a9f7a 09bc438 3d74a39 09bc438 eadf256 3e2f0b7 09bc438 a976824 09bc438 a976824 09bc438 a976824 09bc438 a976824 09bc438 a976824 09bc438 ab2c9ad eadf256 c1e74d5 09bc438 62778fc eadf256 09bc438 3e2f0b7 5c4fbd0 eadf256 09bc438 d24fd05 09bc438 a976824 09bc438 a976824 09bc438 a976824 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import os
import sys
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
import gradio as gr
from huggingface_hub import snapshot_download
# Clone the repository and add the path
os.system("git clone https://github.com/google-research/frame-interpolation")
sys.path.append("frame-interpolation")
# Import after appending the path
from eval import interpolator, util
def load_model(model_name):
model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
return model
model_names = [
"akhaliq/frame-interpolation-film-style",
"NimaBoscarino/frame-interpolation_film_l1",
"NimaBoscarino/frame_interpolation_film_vgg",
]
models = {model_name: load_model(model_name) for model_name in model_names}
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
def resize(width, img):
img = Image.fromarray(img)
wpercent = (width / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((width, hsize), Image.LANCZOS)
return img
def resize_and_crop(img_path, size, crop_origin="middle"):
img = Image.open(img_path)
img = img.resize(size, Image.LANCZOS)
return img
def resize_img(img1, img2_path):
img_target_size = Image.open(img1)
img_to_resize = resize_and_crop(
img2_path,
(img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1
crop_origin="middle"
)
img_to_resize.save('resized_img2.png')
def predict(frame1, frame2, times_to_interpolate, model_name):
model = models[model_name]
frame1 = resize(1080, frame1)
frame2 = resize(1080, frame2)
frame1.save("test1.png")
frame2.save("test2.png")
resize_img("test1.png", "test2.png")
input_frames = ["test1.png", "resized_img2.png"]
frames = list(
util.interpolate_recursively_from_files(
input_frames, times_to_interpolate, model))
mediapy.write_video("out.mp4", frames, fps=30)
return "out.mp4"
title = "frame-interpolation"
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
examples = [
['cat3.jpeg', 'cat4.jpeg', 2, model_names[0]],
['cat1.jpeg', 'cat2.jpeg', 2, model_names[1]],
]
gr.Interface(
fn=predict,
inputs=[
gr.Image(label="First Frame"),
gr.Image(label="Second Frame"),
gr.Number(label="Times to Interpolate", value=2),
gr.Dropdown(label="Model", choices=model_names),
],
outputs=gr.Video(label="Interpolated Frames"),
title=title,
description=description,
article=article,
examples=examples,
).launch()
|