|
import os |
|
import sys |
|
import numpy as np |
|
import tensorflow as tf |
|
import mediapy |
|
from PIL import Image |
|
import gradio as gr |
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
os.system("git clone https://github.com/google-research/frame-interpolation") |
|
sys.path.append("frame-interpolation") |
|
|
|
|
|
from eval import interpolator, util |
|
|
|
def load_model(model_name): |
|
model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None) |
|
return model |
|
|
|
model_names = [ |
|
"akhaliq/frame-interpolation-film-style", |
|
"NimaBoscarino/frame-interpolation_film_l1", |
|
"NimaBoscarino/frame_interpolation_film_vgg", |
|
] |
|
|
|
models = {model_name: load_model(model_name) for model_name in model_names} |
|
|
|
ffmpeg_path = util.get_ffmpeg_path() |
|
mediapy.set_ffmpeg(ffmpeg_path) |
|
|
|
def resize(width, img): |
|
img = Image.fromarray(img) |
|
wpercent = (width / float(img.size[0])) |
|
hsize = int((float(img.size[1]) * float(wpercent))) |
|
img = img.resize((width, hsize), Image.LANCZOS) |
|
return img |
|
|
|
def resize_and_crop(img_path, size, crop_origin="middle"): |
|
img = Image.open(img_path) |
|
img = img.resize(size, Image.LANCZOS) |
|
return img |
|
|
|
def resize_img(img1, img2_path): |
|
img_target_size = Image.open(img1) |
|
img_to_resize = resize_and_crop( |
|
img2_path, |
|
(img_target_size.size[0], img_target_size.size[1]), |
|
crop_origin="middle" |
|
) |
|
img_to_resize.save('resized_img2.png') |
|
|
|
def predict(frame1, frame2, frame3, frame4, frame5, frame6, times_to_interpolate, model_name): |
|
model = models[model_name] |
|
|
|
|
|
frames = [resize(1080, frame) for frame in [frame1, frame2, frame3, frame4, frame5, frame6]] |
|
|
|
|
|
for i, frame in enumerate(frames): |
|
frame.save(f"test{i+1}.png") |
|
if i > 0: |
|
resize_img(f"test1.png", f"test{i+1}.png") |
|
|
|
input_frames = [f"test{i+1}.png" for i in range(6)] |
|
|
|
|
|
interpolated_frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, model)) |
|
|
|
mediapy.write_video("out.mp4", interpolated_frames, fps=30) |
|
return "out.mp4" |
|
|
|
title = "frame-interpolation" |
|
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below." |
|
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>" |
|
examples = [ |
|
['cat3.jpeg', 'cat4.jpeg', 'cat5.jpeg', 'cat6.jpeg', 'cat7.jpeg', 'cat8.jpeg', 2, model_names[0]], |
|
['cat1.jpeg', 'cat2.jpeg', 'cat3.jpeg', 'cat4.jpeg', 'cat5.jpeg', 'cat6.jpeg', 2, model_names[1]], |
|
] |
|
|
|
gr.Interface( |
|
fn=predict, |
|
inputs=[ |
|
gr.Image(label="First Frame"), |
|
gr.Image(label="Second Frame"), |
|
gr.Image(label="Third Frame"), |
|
gr.Image(label="Fourth Frame"), |
|
gr.Image(label="Fifth Frame"), |
|
gr.Image(label="Sixth Frame"), |
|
gr.Number(label="Times to Interpolate", value=2), |
|
gr.Dropdown(label="Model", choices=model_names), |
|
], |
|
outputs=gr.Video(label="Interpolated Frames"), |
|
title=title, |
|
description=description, |
|
article=article, |
|
examples=examples, |
|
).launch() |
|
|