Spaces:
Running
Running
import whisper | |
import deepl | |
import os | |
model = whisper.load_model("base") | |
deepl_auth_key = os.environ["Deepl_API"] | |
def translate(text, target_lang): | |
translator = deepl.Translator(deepl_auth_key) | |
translated_text = translator.translate_text(text, target_lang=target_lang) | |
return translated_text | |
def transcribe(audio): | |
# load audio and pad/trim it to fit 30 seconds | |
audio = whisper.load_audio(audio) | |
audio = whisper.pad_or_trim(audio) | |
# make log-Mel spectrogram and move to the same device as the model | |
mel = whisper.log_mel_spectrogram(audio).to(model.device) | |
# detect the spoken language | |
_, probs = model.detect_language(mel) | |
print(f"Detected language: {max(probs, key=probs.get)}") | |
detect_lang = max(probs, key=probs.get) | |
# decode the audio | |
# options = whisper.DecodingOptions() | |
options = whisper.DecodingOptions(fp16 = False) | |
result = whisper.decode(model, mel, options) | |
# if detect_lang == "en": | |
# print("Text: ", result.text) | |
# translated_text = translate(result.text, "JA") | |
# print("translated_text: ", translated_text) | |
# generated_video = text_to_speech(translated_text) | |
# print("generated_video 01: ", generated_video) | |
# elif detect_lang == "ja": | |
# print("Text: ", result.text) | |
# translated_text = translate(result.text, "EN-US") | |
translated_text = translate(result.text, "JA") | |
return translated_text | |
import gradio as gr | |
title = 'Translator_Video' | |
inputs = gr.Video() | |
outputs = gr.Text() | |
interface = gr.Interface(title=title, fn=transcribe, inputs=inputs, outputs=outputs) | |
interface.launch(debug=True) |