import sys import time from pathlib import Path import anvil.server import anvil.media from whisper.utils import write_srt from youtube_dl import YoutubeDL from youtube_dl.utils import DownloadError import os import tempfile import json import argparse import whisper from whisper.tokenizer import LANGUAGES import ffmpeg from utils.subs import bake_subs from utils.utils import get_args original_dir = os.getcwd() output_dir = Path('output') args = get_args() model_size: str = args.get("model") preload_model: bool = args.get("preload") if preload_model: print("Preloading model") model = whisper.load_model(model_size) def download_generator(url): ### Step 1 : check if video is available yield {"message": f"Checking {url} for videos"} try: meta = check_download(url) print(json.dumps(meta, indent=2)) if(meta['duration'] > 5 * 60): raise Exception("Video is too long, please use videos less than 5 minutes") yield {"message": f"Found video with {meta['duration']} seconds duration from {meta['extractor']}", "meta": meta} tempdir = output_dir/f"{meta['id']}" except Exception as e: yield {"message": f"{e}"} return ### Step 2 : Download video and extract audio yield {"message": f"Starting download with URL {url}, this may take a while"} try: meta, video, audio = download(url, tempdir) yield {"message": f"Downloaded video and extracted audio", "video": video, "audio": audio, "meta": meta} except Exception as e: os.chdir(original_dir) yield {"message": f"{e}"} raise e ### Step 3 : Transcribe with whisper yield {"message": f"[PLEASE WAIT] Starting whisper transcribe with {meta['id']}.mp3"} try: whisper_result = transcribe(audio) srt_path = tempdir / f"{meta['id']}.srt" with open(srt_path, "w", encoding="utf-8") as srt: write_srt(whisper_result["segments"], file=srt) whisper_result["srt"] = Path(srt_path).read_text() yield {"message": f"Transcribe successful", "whisper_result": whisper_result, "meta": meta} except Exception as e: os.chdir(original_dir) yield {"message": f"{e}"} raise e #TODO: add return here so users can continue after editing subtitles ### Step 4 : Bake subtitles into video with ffmpeg yield {"message": f"[PLEASE WAIT] baking subtitles into video"} try: subbed_video_path = tempdir / f"{meta['id']}_translated.mp4" fontsdir = Path('fonts') bake_subs(video, subbed_video_path.absolute() , srt_path.absolute(), fontsdir) yield {"message": f"Subtitled video ready!", "sub_video": str(subbed_video_path.absolute()), "meta": meta} except ffmpeg.Error as e: print('stdout:', e.stdout.decode('utf8')) print('stderr:', e.stderr.decode('utf8')) raise e except Exception as e: print('stdout:', e.stdout.decode('utf8')) print('stderr:', e.stderr.decode('utf8')) os.chdir(original_dir) print('error', file=sys.stderr) raise e yield {"message": f"{e}"} def progress_hook(d): if d['status'] == 'downloading': print("downloading " + str(round(float(d['downloaded_bytes']) / float(d['total_bytes']) * 100, 1)) + "%") yield f"{d['_percent_str']} downloaded" if d['status'] == 'finished': filename = d['filename'] print(filename) yield f"Downloaded {filename}" def download(url, tempdir): try: ydl_opts = { "format": "bestvideo[ext=mp4]+bestaudio/best", "keepvideo": True, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], "skip_download": False, "outtmpl": f"{tempdir}/%(id)s.%(ext)s", "noplaylist": True, "verbose": False, "quiet": True, "progress_hooks": [progress_hook], } ydl = YoutubeDL(ydl_opts) meta = ydl.extract_info( url, download=True, ) except DownloadError as e: raise e else: video = tempdir / f"{meta['id']}.{meta['ext']}" audio = tempdir / f"{meta['id']}.mp3" print(str(video.resolve())) return meta, str(video.resolve()), str(audio.resolve()) def check_download(url): ydl_opts = { "format": "bestvideo[ext=mp4]+bestaudio/best", "skip_download": True, "verbose": False, } ydl = YoutubeDL(ydl_opts) try: meta = ydl.extract_info( url, download=False, ) except DownloadError as e: raise e else: return meta def transcribe(audio): print('Starting transcribe...') global model if not preload_model: model = whisper.load_model(model_size) output = model.transcribe(audio, task="translate") output["language"] = LANGUAGES[output["language"]] output['segments'] = [{"id": 0, "seek": 0, "start": 0.0, "end": 3, "text": " [AI translation by @vidtranslator]"}] + output['segments'] print(f'Finished transcribe from {output["language"]}', output["text"]) return output