|
import warnings |
|
warnings.filterwarnings("ignore") |
|
|
|
import os |
|
import re |
|
import librosa |
|
import webrtcvad |
|
import nbimporter |
|
import torchaudio |
|
import numpy as np |
|
import gradio as gr |
|
import scipy.signal |
|
import soundfile as sf |
|
from transformers import pipeline |
|
from transformers import AutoProcessor |
|
from pyctcdecode import build_ctcdecoder |
|
from transformers import Wav2Vec2ProcessorWithLM |
|
|
|
from text2int import text_to_int |
|
from isNumber import is_number |
|
from Text2List import text_to_list |
|
from convert2list import convert_to_list |
|
from processDoubles import process_doubles |
|
from replaceWords import replace_words |
|
from applyVad import apply_vad |
|
from wienerFilter import wiener_filter |
|
from highPassFilter import high_pass_filter |
|
from wavletDenoise import wavelet_denoise |
|
|
|
|
|
|
|
transcriber_hindi_new = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_new") |
|
transcriber_hindi_old = pipeline(task="automatic-speech-recognition", model="cdactvm/huggingface-hindi_model") |
|
processor = AutoProcessor.from_pretrained("cdactvm/w2v-bert-2.0-hindi_new") |
|
vocab_dict = processor.tokenizer.get_vocab() |
|
sorted_vocab_dict = {k.lower(): v for k, v in sorted(vocab_dict.items(), key=lambda item: item[1])} |
|
decoder = build_ctcdecoder( |
|
labels=list(sorted_vocab_dict.keys()), |
|
kenlm_model_path="lm.binary", |
|
) |
|
processor_with_lm = Wav2Vec2ProcessorWithLM( |
|
feature_extractor=processor.feature_extractor, |
|
tokenizer=processor.tokenizer, |
|
decoder=decoder |
|
) |
|
processor.feature_extractor._processor_class = "Wav2Vec2ProcessorWithLM" |
|
transcriber_hindi_lm = pipeline("automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_new", tokenizer=processor_with_lm, feature_extractor=processor_with_lm.feature_extractor, decoder=processor_with_lm.decoder) |
|
|
|
|
|
def transcribe_hindi_new(audio): |
|
|
|
transcript = transcriber_hindi_new(audio) |
|
text_value = transcript['text'] |
|
processd_doubles=process_doubles(text_value) |
|
replaced_words = replace_words(processd_doubles) |
|
converted_text=text_to_int(replaced_words) |
|
return converted_text |
|
|
|
def transcribe_hindi_lm(audio): |
|
|
|
transcript = transcriber_hindi_lm(audio) |
|
text_value = transcript['text'] |
|
processd_doubles=process_doubles(text_value) |
|
replaced_words = replace_words(processd_doubles) |
|
converted_text=text_to_int(replaced_words) |
|
return converted_text |
|
|
|
def transcribe_hindi_old(audio): |
|
|
|
transcript = transcriber_hindi_old(audio) |
|
text_value = transcript['text'] |
|
cleaned_text=text_value.replace("<s>","") |
|
processd_doubles=process_doubles(cleaned_text) |
|
replaced_words = replace_words(processd_doubles) |
|
converted_text=text_to_int(replaced_words) |
|
return converted_text |
|
|
|
|
|
|
|
|
|
|
|
def apply_wiener_filter(audio): |
|
return wiener(audio) |
|
|
|
|
|
def Noise_cancellation_function(audio_file): |
|
|
|
audio, sr = librosa.load(audio_file, sr=16000) |
|
|
|
|
|
audio = high_pass_filter(audio, sr) |
|
|
|
|
|
audio = apply_wiener_filter(audio) |
|
|
|
|
|
denoised_audio = wavelet_denoise(audio) |
|
|
|
|
|
temp_wav = "temp_denoised.wav" |
|
write(temp_wav, sr, denoised_audio) |
|
|
|
|
|
transcript = transcriber_hindi_lm(temp_wav) |
|
text_value = transcript['text'] |
|
cleaned_text=text_value.replace("<s>","") |
|
processd_doubles=process_doubles(cleaned_text) |
|
replaced_words = replace_words(processd_doubles) |
|
converted_text=text_to_int(replaced_words) |
|
return converted_text |
|
|
|
|
|
|
|
def sel_lng(lng, mic=None, file=None): |
|
if mic is not None: |
|
audio = mic |
|
elif file is not None: |
|
audio = file |
|
else: |
|
return "You must either provide a mic recording or a file" |
|
|
|
if lng == "model_1": |
|
return transcribe_hindi_old(audio) |
|
elif lng == "model_2": |
|
return transcribe_hindi_new(audio) |
|
elif lng== "model_3": |
|
return transcribe_hindi_lm(audio) |
|
elif lng== "model_4": |
|
return Noise_cancellation_function(audio) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo=gr.Interface( |
|
fn=sel_lng, |
|
|
|
inputs=[ |
|
gr.Dropdown([ |
|
"model_1","model_2","model_3","model_4"],label="Select Model"), |
|
gr.Audio(sources=["microphone","upload"], type="filepath"), |
|
], |
|
outputs=[ |
|
"textbox" |
|
], |
|
title="Automatic Speech Recognition", |
|
description = "Demo for Automatic Speech Recognition. Use microphone to record speech. Please press Record button. Initially it will take some time to load the model. The recognized text will appear in the output textbox", |
|
).launch() |
|
|
|
|