|
|
|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
import os |
|
|
|
from scenedetect import open_video, SceneManager |
|
from scenedetect.detectors import ContentDetector |
|
|
|
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip |
|
|
|
|
|
|
|
title = "Scene Edit Detection" |
|
description = "<p style='text-align: center'>Gradio demo of PySceneDetect. <br />Automatically find every shots in a video sequence</p><p style='text-align: center'> 1. gives you timecode in/out for each shot. 2. saves each shot as a splitted mp4 video chunk for you to download. 3. diplays a thumbnail for each shot as a gallery output.<br /> <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.scene-edit-detection' style='display: inline-block'/></b></p>" |
|
article = "<p style='text-align: center'><a href='http://scenedetect.com/en/latest/' target='_blank'>PySceneDetect website</a> | <a href='https://github.com/Breakthrough/PySceneDetect' target='_blank'>Github Repo</a></p>" |
|
|
|
|
|
|
|
|
|
video_input = gr.Video(source="upload", format="mp4", label="Video Sequence", mirror_webcam=False) |
|
threshold = gr.Slider(label="Threshold pixel comparison: if exceeded, triggers a scene cut. Default: 27.0", minimum=15.0, maximum=40.0, value=27.0) |
|
|
|
|
|
|
|
def convert_to_tuple(list): |
|
return tuple(list); |
|
|
|
|
|
def find_scenes(video_path, threshold): |
|
|
|
filename = os.path.splitext(os.path.basename(video_path))[0] |
|
|
|
video = open_video(video_path) |
|
scene_manager = SceneManager() |
|
scene_manager.add_detector( |
|
ContentDetector(threshold=threshold)) |
|
|
|
|
|
scene_manager.detect_scenes(video, show_progress=True) |
|
scene_list = scene_manager.get_scene_list() |
|
|
|
|
|
data_outputs.append(scene_list) |
|
gradio_components_outputs.append("json") |
|
|
|
|
|
timecodes = [] |
|
timecodes.append({"title": filename + ".mp4", "fps": scene_list[0][0].get_framerate()}) |
|
|
|
shots = [] |
|
stills = [] |
|
|
|
|
|
|
|
|
|
for i, shot in enumerate(scene_list): |
|
|
|
|
|
|
|
framerate = shot[0].get_framerate() |
|
shot_in = shot[0].get_frames() / framerate |
|
shot_out = shot[1].get_frames() / framerate |
|
|
|
tc_in = shot[0].get_timecode() |
|
tc_out = shot[1].get_timecode() |
|
|
|
frame_in = shot[0].get_frames() |
|
frame_out = shot[1].get_frames() |
|
|
|
timecode = {"tc_in": tc_in, "tc_out": tc_out, "frame_in": frame_in, "frame_out": frame_out} |
|
timecodes.append(timecode) |
|
|
|
|
|
target_name = "shot_" + str(i+1) + "_" + str(filename) + ".mp4" |
|
|
|
|
|
ffmpeg_extract_subclip(video_path, shot_in, shot_out, targetname=target_name) |
|
|
|
|
|
shots.append(target_name) |
|
|
|
|
|
data_outputs.append(target_name) |
|
gradio_components_outputs.append("video") |
|
|
|
|
|
|
|
|
|
|
|
vid = cv2.VideoCapture(video_path) |
|
fps = vid.get(cv2.CAP_PROP_FPS) |
|
print('frames per second =',fps) |
|
|
|
frame_id = shot[0].get_frames() |
|
|
|
vid.set(cv2.CAP_PROP_POS_FRAMES, frame_id) |
|
ret, frame = vid.read() |
|
|
|
|
|
img = str(frame_id) + '_screenshot.png' |
|
cv2.imwrite(img,frame) |
|
|
|
|
|
stills.append((img, 'shot ' + str(i+1))) |
|
|
|
|
|
data_outputs.append(shots) |
|
gradio_components_outputs.append("file") |
|
|
|
|
|
data_outputs.append(stills) |
|
gradio_components_outputs.append("gallery") |
|
|
|
|
|
|
|
|
|
results = convert_to_tuple(data_outputs) |
|
print(results) |
|
|
|
|
|
|
|
|
|
|
|
|
|
return timecodes, shots, stills |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_outputs = [] |
|
|
|
|
|
|
|
gradio_components_outputs = [] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
outputs = [gr.JSON(label="Shots detected"), gr.File(label="Downloadable Shots"), gr.Gallery(label="Still Images from each shot").style(grid=3)] |
|
|
|
|
|
print('Hello Sylvain') |
|
gr.Interface(fn=find_scenes, inputs=[video_input, threshold], outputs=outputs, title=title, description=description, article=article).launch() |