import os import subprocess from datetime import datetime import gradio as gr def video_to_audio_tab(): def video_to_audio(video): output_file_name = "{datetime}.mp3".format(datetime=datetime.now().isoformat().replace(':', '-')) output = os.path.join("out_audio", output_file_name) command = "ffmpeg -loglevel quiet -i \"{video}\" -map a \"{output}\"".format(video=video, output=output) subprocess.call(command, shell=True) return output with gr.Tab("video2audio"): with gr.Row(): video_input = gr.Video(label="Input video") with gr.Column(): audio_output = gr.Audio(label="Output audio", type="filepath") video_to_audio_button = gr.Button(value="Extract audio", variant="primary") video_to_audio_button.click(video_to_audio, inputs=video_input, outputs=audio_output) def audio_to_video_tab(): def audio_to_video(audio, image): output_file_name = "{datetime}.mp4".format(datetime=datetime.now().isoformat().replace(':', '-')) output = os.path.join("out_video", output_file_name) # command = "ffmpeg -i \"{image}\" -i \"{audio}\" \"{output}\"".format(image=image, audio=audio, output=output) command = "ffmpeg -loglevel quiet -loop 1 -i \"{image}\" -i \"{audio}\" -r 2 -c:v libx264 -tune stillimage -c:a aac -b:a 192k -pix_fmt yuv420p -shortest \"{output}\"".format(image=image, audio=audio, output=output) subprocess.call(command, shell=True) return output with gr.Tab("audio2video"): with gr.Row(): with gr.Column(): image_input = gr.Image(label="Input image", type="filepath") audio_input = gr.Audio(label="Input audio", type="filepath") with gr.Column(): video_output = gr.Video(label="Output video") create_video_button = gr.Button(value="Merge audio with provided image", variant="primary") create_video_button.click(audio_to_video, inputs=[audio_input,image_input], outputs=video_output) with gr.Blocks() as demo: video_to_audio_tab() audio_to_video_tab() ffmpeg_info_command = "ffmpeg -version" subprocess.call(ffmpeg_info_command, shell=True) demo.launch()