import gradio as gr from transformers.pipelines.audio_utils import ffmpeg_read from transformers import WhisperForConditionalGeneration, AutoProcessor model_name = "GiorgiSekhniashvili/whisper-tiny-ka-01" processor = AutoProcessor.from_pretrained(model_name) model = WhisperForConditionalGeneration.from_pretrained(model_name) forced_decoder_ids = processor.get_decoder_prompt_ids( language="Georgian", task="transcribe" ) def predict(audio_path): if audio_path: with open(audio_path, "rb") as f: waveform = ffmpeg_read(f.read(), sampling_rate=16_000) input_values = processor(waveform, sampling_rate=16_000, return_tensors="pt") res = model.generate( input_values["input_features"], forced_decoder_ids=forced_decoder_ids, max_new_tokens=448, ) return processor.batch_decode(res, skip_special_tokens=True)[0] mic = gr.Audio(source="microphone", type="filepath", label="Speak here...") demo = gr.Interface(predict, mic, "text") demo.launch()