import os import warnings import gradio as gr from transformers import pipeline import re # Initialize the speech recognition pipeline and transliterator #p1 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-odia_v1") #p2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1") HF_TOKEN = os.getenv('HW_TOKEN') hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio") cur_line=0 def readFile(): f=open('prompt.txt') line_num=0 lines=f.readlines() line_num = len(lines) return line_num,lines totlines,file_content=readFile() callback = gr.CSVLogger() def readPromt(): global cur_line cur_line+=1 global file_content print (cur_line) return file_content[cur_line] def readNext(): global totlines print(totlines) global cur_line if cur_line=0: cur_line-=1 #cur_line=current_line global file_content print (cur_line) return [file_content[cur_line],None] demo = gr.Blocks() with demo: #dr=gr.Dropdown(["Hindi","Odiya"],value="Odiya",label="Select Language") #audio_file = gr.Audio(sources=["microphone","upload"],type="filepath") text = gr.Textbox(readPromt()) upfile = gr.Audio( sources=["microphone","upload"], type="filepath", label="Record" ) #upfile = gr.inputs.Audio(source="upload", type="filepath", label="Upload") with gr.Row(): b1 = gr.Button("Save") b2 = gr.Button("Next") b3 = gr.Button("Previous") #b4=gr.Button("Clear") b2.click(readNext,inputs=None,outputs=[text,upfile]) b3.click(readPrevious,inputs=None,outputs=[text,upfile]) #b4.click(lambda: None, outputs=upfile) # b1.click(sel_lng, inputs=[dr,mic,upfile], outputs=text) #b2.click(text_to_sentiment, inputs=text, outputs=label) #callback.setup([text, upfile], "flagged_data_points") #callback.setup([text, upfile], hf_writer) #b1.click(lambda *args: callback.flag(args), [text, upfile], None, preprocess=False) flagging_callback=hf_writer b1.click(lambda *args: flagging_callback, [text, upfile], None, preprocess=False) demo.launch()