import time from transformers import pipeline import gradio as gr import numpy as np import librosa pipe2 = pipeline("automatic-speech-recognition", model="ai4bharat/indicwav2vec-hindi") def resample_to_16k(audio, orig_sr): y_resampled = librosa.resample(y=audio, orig_sr=orig_sr, target_sr=16000) return y_resampled def transcribe(audio): sr,y = audio y = y.astype(np.float32) y/= np.max(np.abs(y)) y_resampled = resample_to_16k(y,sr) trans = pipe2(y_resampled) return trans["text"] demo = gr.Interface( transcribe, inputs="microphone", outputs="text", examples=["./Samples/Hindi_1.mp3","./Samples/Hindi_2.mp3","./Samples/Hindi_3.mp3","./Samples/Hindi_4.mp3","./Samples/Hindi_5.mp3"]) demo.launch()