yoru_tomosu commited on
Commit
346ee80
1 Parent(s): d8f80e1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install -q git+https://github.com/openai/whisper.git
2
+ !pip install -q gradio
3
+ !pip install -q deepl
4
+ !pip install -q requests
5
+
6
+ import whisper
7
+ model = whisper.load_model("base")
8
+
9
+ import deepl
10
+ deepl_auth_key = os.environ["ElevenLabs_API"]
11
+
12
+ def translate(text, target_lang):
13
+ translator = deepl.Translator(deepl_auth_key)
14
+ translated_text = translator.translate_text(text, target_lang=target_lang)
15
+ return translated_text
16
+
17
+ def transcribe(audio):
18
+
19
+ # load audio and pad/trim it to fit 30 seconds
20
+ audio = whisper.load_audio(audio)
21
+ audio = whisper.pad_or_trim(audio)
22
+
23
+ # make log-Mel spectrogram and move to the same device as the model
24
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
25
+
26
+ # detect the spoken language
27
+ _, probs = model.detect_language(mel)
28
+ print(f"Detected language: {max(probs, key=probs.get)}")
29
+ detect_lang = max(probs, key=probs.get)
30
+
31
+ # decode the audio
32
+ options = whisper.DecodingOptions()
33
+ result = whisper.decode(model, mel, options)
34
+
35
+
36
+ translated_text = translate(result.text, "JA")
37
+ return translated_text
38
+
39
+ import gradio as gr
40
+
41
+ title = 'Video Translator'
42
+
43
+ inputs = gr.Video()
44
+ outputs = gr.Text()
45
+ interface = gr.Interface(title=title, fn=transcribe, inputs=inputs, outputs=outputs)
46
+ interface.launch(debug=True)
47
+