asr-inference / app.py
wetdog's picture
fix imports
f37cd83 verified
import spaces
import torch
import gradio as gr
from AinaTheme import theme
from transformers import pipeline
MODEL_NAME = "projecte-aina/whisper-large-v3-ca-es-synth-cs"
BATCH_SIZE = 8
device = 0 if torch.cuda.is_available() else "cpu"
pipe = pipeline(
task="automatic-speech-recognition",
model=MODEL_NAME,
chunk_length_s=30,
device=device,
)
@spaces.GPU
def transcribe(inputs):
if inputs is None:
raise gr.Error("Cap fitxer d'脿udio introduit! Si us plau pengeu un fitxer "\
"o enregistreu un 脿udio abans d'enviar la vostra sol路licitud")
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": "transcribe"}, return_timestamps=True)["text"]
return text
description_string = "Transcripci贸 autom脿tica de micr貌fon o de fitxers d'脿udio.\n Aquest demostrador s'ha desenvolupat per"\
" comprovar els models de reconeixement de parla per a m贸bils. Per ara utilitza el checkpoint "\
f"[{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) i la llibreria de 馃 Transformers per a la transcripci贸."
def clear():
return (
None
)
with gr.Blocks(theme=theme) as demo:
gr.Markdown(description_string)
with gr.Row():
with gr.Column(scale=1):
#input = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Audio")
input = gr.Audio(sources=["upload"], type="filepath", label="Audio")
with gr.Column(scale=1):
output = gr.Textbox(label="Output", lines=8)
with gr.Row(variant="panel"):
clear_btn = gr.Button("Clear")
submit_btn = gr.Button("Submit", variant="primary")
submit_btn.click(fn=transcribe, inputs=[input], outputs=[output])
clear_btn.click(fn=clear,inputs=[], outputs=[input], queue=False,)
if __name__ == "__main__":
demo.launch()