File size: 1,727 Bytes
1c09abb
6cced05
 
 
 
1c09abb
6cced05
 
1c09abb
6cced05
 
 
 
 
 
 
 
 
 
 
 
 
1c09abb
6cced05
 
 
 
 
 
 
 
 
 
 
69f6d78
 
 
 
 
 
 
 
 
 
6cced05
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
import nemo.collections.asr as nemo_asr
from pydub import AudioSegment
import pyaudioconvert as pac
import timeit

hf_model = nemo_asr.models.EncDecRNNTBPEModel.from_pretrained(
                model_name="mbazaNLP/Kinyarwanda_nemo_stt_conformer_model")

def convert (audio):
  file_name = audio.name
  if file_name.endswith("mp3") or file_name.endswith("wav") or file_name.endswith("ogg"):
    if file_name.endswith("mp3"):
      sound = AudioSegment.from_mp3(audio.name)
      sound.export(audio.name, format="wav")
    elif file_name.endswith("ogg"):
      sound = AudioSegment.from_ogg(audio.name)
      sound.export(audio.name, format="wav")
  else:
    return False
  pac.convert_wav_to_16bit_mono(audio.name,audio.name)
  return True

def transcribe(audio):
  start = timeit.default_timer()
  if convert(audio)== False:
    return "The format must be mp3,wav and ogg"

  files = [audio.name]
  print(audio.name)
  for fname, transcription in zip(files, hf_model.transcribe(paths2audio_files=files)):
    stop = timeit.default_timer()
    return "message"+ transcription[0]+ "\nfilename"+ audio.name+"\nTrancriptionTime"+stop-start

gradio_ui = gr.Interface(
    fn=transcribe,
    title="Kinyarwanda Speech Recognition",
    description="Upload an audio clip or record from browser using microphone, and let AI do the hard work of transcribing.",
    article = """
    This demo showcases the pretrained model from deepspeech.
    """,
    inputs=[gr.inputs.Audio(label="Upload Audio File", type="file", optional=True), gr.inputs.Audio(source="microphone", type="file", optional=True, label="Record from microphone")],
    outputs=[gr.outputs.Textbox(label="Recognized speech")]
)
gradio_ui.launch()