asffadsa's picture
Duplicate from akhaliq/frame-interpolation
c41bc83
import os
os.system("git clone https://github.com/google-research/frame-interpolation")
import sys
sys.path.append("frame-interpolation")
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
from eval import interpolator, util
import gradio as gr
from huggingface_hub import snapshot_download
from image_tools.sizes import resize_and_crop
def load_model(model_name):
model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)
return model
model_names = [
"akhaliq/frame-interpolation-film-style",
"NimaBoscarino/frame-interpolation_film_l1",
"NimaBoscarino/frame_interpolation_film_vgg",
]
models = {model_name: load_model(model_name) for model_name in model_names}
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
def resize(width, img):
basewidth = width
img = Image.open(img)
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
return img
def resize_img(img1, img2):
img_target_size = Image.open(img1)
img_to_resize = resize_and_crop(
img2,
(img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1
crop_origin="middle"
)
img_to_resize.save('resized_img2.png')
def predict(frame1, frame2, times_to_interpolate, model_name):
model = models[model_name]
frame1 = resize(256, frame1)
frame2 = resize(256, frame2)
frame1.save("test1.png")
frame2.save("test2.png")
resize_img("test1.png", "test2.png")
input_frames = ["test1.png", "resized_img2.png"]
frames = list(
util.interpolate_recursively_from_files(
input_frames, times_to_interpolate, model))
mediapy.write_video("out.mp4", frames, fps=30)
return "out.mp4"
title = "frame-interpolation"
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
examples = [
['cat3.jpeg', 'cat4.jpeg', 2, model_names[0]],
['cat1.jpeg', 'cat2.jpeg', 2, model_names[1]],
]
gr.Interface(
predict,
[
gr.inputs.Image(type='filepath'),
gr.inputs.Image(type='filepath'),
gr.inputs.Slider(minimum=2, maximum=4, step=1),
gr.inputs.Dropdown(choices=model_names, default=model_names[0])
],
"playable_video",
title=title,
description=description,
article=article,
examples=examples
).launch(enable_queue=True)